diff --git a/som_out/bear/cfg.yaml b/som_out/bear/cfg.yaml deleted file mode 100644 index 84685b3ebc27e3e8bcc672e059dd525a59e7bbe5..0000000000000000000000000000000000000000 --- a/som_out/bear/cfg.yaml +++ /dev/null @@ -1,68 +0,0 @@ -batch_size: 8 -data: - camera_type: droid_recon - depth_type: aligned_depth_anything - end: -1 - image_type: JPEGImages - load_from_cache: false - mask_erosion_radius: 3 - mask_type: Annotations - num_targets_per_frame: 4 - res: 480p - root_dir: SOM_data_lcx/Davis_Data - scene_norm_dict: null - seq_name: bear - start: 0 - track_2d_type: bootstapir -loss: - w_depth_const: 0.1 - w_depth_grad: 1 - w_depth_reg: 0.5 - w_mask: 1.0 - w_rgb: 1.0 - w_scale_var: 0.01 - w_smooth_bases: 0.1 - w_smooth_tracks: 2.0 - w_track: 2.0 - w_z_accel: 1.0 -lr: - bg: - colors: 0.01 - means: 0.00016 - opacities: 0.05 - quats: 0.001 - scales: 0.005 - fg: - colors: 0.01 - means: 0.00016 - motion_coefs: 0.01 - opacities: 0.01 - quats: 0.001 - scales: 0.005 - motion_bases: - rots: 0.00016 - transls: 0.00016 -num_bg: 100000 -num_dl_workers: 4 -num_epochs: 500 -num_fg: 40000 -num_motion_bases: 10 -optim: - control_every: 100 - cull_opacity_threshold: 0.1 - cull_scale_threshold: 0.5 - cull_screen_threshold: 0.15 - densify_scale_threshold: 0.01 - densify_screen_threshold: 0.05 - densify_xys_grad_threshold: 0.0002 - max_steps: 5000 - reset_opacity_every_n_controls: 30 - stop_control_by_screen_steps: 4000 - stop_control_steps: 4000 - stop_densify_steps: 15000 - warmup_steps: 200 -port: null -save_videos_every: 50 -validate_every: 50 -vis_debug: false -work_dir: SOM_output_lcx/Davis_out/bear diff --git a/som_out/bear/checkpoints/last.ckpt b/som_out/bear/checkpoints/last.ckpt deleted file mode 100644 index fd58f9c09d83f9bb22c6dd274b84e2ca64f06b5c..0000000000000000000000000000000000000000 --- a/som_out/bear/checkpoints/last.ckpt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dc6c1e90687f97e64dd6e436781493c0aba7dd6e726103926e3bb396b30a52ac -size 138448040 diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__init__.py b/som_out/bear/code/2024-10-25-234902/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/configs.py b/som_out/bear/code/2024-10-25-234902/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/__init__.py b/som_out/bear/code/2024-10-25-234902/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 375f888d320380a5c331b6d14fff062e4925c21b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-25-234902/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-25-234902/flow3d/data/casual_dataset.py deleted file mode 100644 index db367682ee99a59fd914240269d8f8e13e00bd1f..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/colmap.py b/som_out/bear/code/2024-10-25-234902/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-25-234902/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/data/utils.py b/som_out/bear/code/2024-10-25-234902/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/init_utils.py b/som_out/bear/code/2024-10-25-234902/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/loss_utils.py b/som_out/bear/code/2024-10-25-234902/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/metrics.py b/som_out/bear/code/2024-10-25-234902/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/params.py b/som_out/bear/code/2024-10-25-234902/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/renderer.py b/som_out/bear/code/2024-10-25-234902/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/scene_model.py b/som_out/bear/code/2024-10-25-234902/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-25-234902/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/trainer.py b/som_out/bear/code/2024-10-25-234902/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/trajectories.py b/som_out/bear/code/2024-10-25-234902/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/transforms.py b/som_out/bear/code/2024-10-25-234902/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/validator.py b/som_out/bear/code/2024-10-25-234902/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-25-234902/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-234902/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-25-234902/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-25-234902/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/utils.py b/som_out/bear/code/2024-10-25-234902/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-25-234902/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-25-234902/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-25-234902/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-25-234902/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-25-234902/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-25-234902/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-234902/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__init__.py b/som_out/bear/code/2024-10-25-235122/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/configs.py b/som_out/bear/code/2024-10-25-235122/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/__init__.py b/som_out/bear/code/2024-10-25-235122/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 375f888d320380a5c331b6d14fff062e4925c21b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-25-235122/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-25-235122/flow3d/data/casual_dataset.py deleted file mode 100644 index db367682ee99a59fd914240269d8f8e13e00bd1f..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/colmap.py b/som_out/bear/code/2024-10-25-235122/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-25-235122/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/data/utils.py b/som_out/bear/code/2024-10-25-235122/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/init_utils.py b/som_out/bear/code/2024-10-25-235122/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/loss_utils.py b/som_out/bear/code/2024-10-25-235122/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/metrics.py b/som_out/bear/code/2024-10-25-235122/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/params.py b/som_out/bear/code/2024-10-25-235122/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/renderer.py b/som_out/bear/code/2024-10-25-235122/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/scene_model.py b/som_out/bear/code/2024-10-25-235122/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-25-235122/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/trainer.py b/som_out/bear/code/2024-10-25-235122/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/trajectories.py b/som_out/bear/code/2024-10-25-235122/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/transforms.py b/som_out/bear/code/2024-10-25-235122/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/validator.py b/som_out/bear/code/2024-10-25-235122/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-25-235122/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235122/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-25-235122/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-25-235122/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/utils.py b/som_out/bear/code/2024-10-25-235122/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-25-235122/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-25-235122/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-25-235122/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-25-235122/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-25-235122/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-25-235122/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235122/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__init__.py b/som_out/bear/code/2024-10-25-235708/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/configs.py b/som_out/bear/code/2024-10-25-235708/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/__init__.py b/som_out/bear/code/2024-10-25-235708/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 375f888d320380a5c331b6d14fff062e4925c21b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-25-235708/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-25-235708/flow3d/data/casual_dataset.py deleted file mode 100644 index db367682ee99a59fd914240269d8f8e13e00bd1f..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/colmap.py b/som_out/bear/code/2024-10-25-235708/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-25-235708/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/data/utils.py b/som_out/bear/code/2024-10-25-235708/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/init_utils.py b/som_out/bear/code/2024-10-25-235708/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/loss_utils.py b/som_out/bear/code/2024-10-25-235708/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/metrics.py b/som_out/bear/code/2024-10-25-235708/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/params.py b/som_out/bear/code/2024-10-25-235708/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/renderer.py b/som_out/bear/code/2024-10-25-235708/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/scene_model.py b/som_out/bear/code/2024-10-25-235708/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-25-235708/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/trainer.py b/som_out/bear/code/2024-10-25-235708/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/trajectories.py b/som_out/bear/code/2024-10-25-235708/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/transforms.py b/som_out/bear/code/2024-10-25-235708/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/validator.py b/som_out/bear/code/2024-10-25-235708/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-25-235708/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-25-235708/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-25-235708/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-25-235708/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/utils.py b/som_out/bear/code/2024-10-25-235708/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-25-235708/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-25-235708/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-25-235708/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-25-235708/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-25-235708/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-25-235708/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-25-235708/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__init__.py b/som_out/bear/code/2024-10-26-005029/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/configs.py b/som_out/bear/code/2024-10-26-005029/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/__init__.py b/som_out/bear/code/2024-10-26-005029/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 375f888d320380a5c331b6d14fff062e4925c21b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-26-005029/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-26-005029/flow3d/data/casual_dataset.py deleted file mode 100644 index db367682ee99a59fd914240269d8f8e13e00bd1f..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/colmap.py b/som_out/bear/code/2024-10-26-005029/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-26-005029/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/data/utils.py b/som_out/bear/code/2024-10-26-005029/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/init_utils.py b/som_out/bear/code/2024-10-26-005029/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/loss_utils.py b/som_out/bear/code/2024-10-26-005029/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/metrics.py b/som_out/bear/code/2024-10-26-005029/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/params.py b/som_out/bear/code/2024-10-26-005029/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/renderer.py b/som_out/bear/code/2024-10-26-005029/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/scene_model.py b/som_out/bear/code/2024-10-26-005029/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-26-005029/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/trainer.py b/som_out/bear/code/2024-10-26-005029/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/trajectories.py b/som_out/bear/code/2024-10-26-005029/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/transforms.py b/som_out/bear/code/2024-10-26-005029/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/validator.py b/som_out/bear/code/2024-10-26-005029/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-26-005029/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005029/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-26-005029/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-26-005029/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/utils.py b/som_out/bear/code/2024-10-26-005029/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-26-005029/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-26-005029/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-26-005029/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-26-005029/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-26-005029/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-26-005029/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005029/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__init__.py b/som_out/bear/code/2024-10-26-005304/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/configs.py b/som_out/bear/code/2024-10-26-005304/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/__init__.py b/som_out/bear/code/2024-10-26-005304/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 375f888d320380a5c331b6d14fff062e4925c21b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-26-005304/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-26-005304/flow3d/data/casual_dataset.py deleted file mode 100644 index db367682ee99a59fd914240269d8f8e13e00bd1f..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/colmap.py b/som_out/bear/code/2024-10-26-005304/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-26-005304/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/data/utils.py b/som_out/bear/code/2024-10-26-005304/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/init_utils.py b/som_out/bear/code/2024-10-26-005304/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/loss_utils.py b/som_out/bear/code/2024-10-26-005304/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/metrics.py b/som_out/bear/code/2024-10-26-005304/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/params.py b/som_out/bear/code/2024-10-26-005304/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/renderer.py b/som_out/bear/code/2024-10-26-005304/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/scene_model.py b/som_out/bear/code/2024-10-26-005304/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-26-005304/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/trainer.py b/som_out/bear/code/2024-10-26-005304/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/trajectories.py b/som_out/bear/code/2024-10-26-005304/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/transforms.py b/som_out/bear/code/2024-10-26-005304/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/validator.py b/som_out/bear/code/2024-10-26-005304/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-26-005304/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005304/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-26-005304/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-26-005304/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/utils.py b/som_out/bear/code/2024-10-26-005304/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-26-005304/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-26-005304/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-26-005304/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-26-005304/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-26-005304/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-26-005304/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005304/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__init__.py b/som_out/bear/code/2024-10-26-005638/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/configs.py b/som_out/bear/code/2024-10-26-005638/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/__init__.py b/som_out/bear/code/2024-10-26-005638/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 1b2448fcea15c8b3dde686b4b911945d6534d233..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-26-005638/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-26-005638/flow3d/data/casual_dataset.py deleted file mode 100644 index b278e42803429ce7265169b2f24f942723fea3eb..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depthcrafter" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/colmap.py b/som_out/bear/code/2024-10-26-005638/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-26-005638/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/data/utils.py b/som_out/bear/code/2024-10-26-005638/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/init_utils.py b/som_out/bear/code/2024-10-26-005638/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/loss_utils.py b/som_out/bear/code/2024-10-26-005638/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/metrics.py b/som_out/bear/code/2024-10-26-005638/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/params.py b/som_out/bear/code/2024-10-26-005638/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/renderer.py b/som_out/bear/code/2024-10-26-005638/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/scene_model.py b/som_out/bear/code/2024-10-26-005638/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-26-005638/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/trainer.py b/som_out/bear/code/2024-10-26-005638/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/trajectories.py b/som_out/bear/code/2024-10-26-005638/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/transforms.py b/som_out/bear/code/2024-10-26-005638/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/validator.py b/som_out/bear/code/2024-10-26-005638/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-26-005638/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005638/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-26-005638/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-26-005638/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/utils.py b/som_out/bear/code/2024-10-26-005638/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-26-005638/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-26-005638/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-26-005638/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-26-005638/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-26-005638/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-26-005638/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005638/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__init__.py b/som_out/bear/code/2024-10-26-005721/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/configs.py b/som_out/bear/code/2024-10-26-005721/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/__init__.py b/som_out/bear/code/2024-10-26-005721/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-26-005721/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-26-005721/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/colmap.py b/som_out/bear/code/2024-10-26-005721/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-26-005721/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/data/utils.py b/som_out/bear/code/2024-10-26-005721/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/init_utils.py b/som_out/bear/code/2024-10-26-005721/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/loss_utils.py b/som_out/bear/code/2024-10-26-005721/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/metrics.py b/som_out/bear/code/2024-10-26-005721/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/params.py b/som_out/bear/code/2024-10-26-005721/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/renderer.py b/som_out/bear/code/2024-10-26-005721/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/scene_model.py b/som_out/bear/code/2024-10-26-005721/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-26-005721/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/trainer.py b/som_out/bear/code/2024-10-26-005721/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/trajectories.py b/som_out/bear/code/2024-10-26-005721/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/transforms.py b/som_out/bear/code/2024-10-26-005721/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/validator.py b/som_out/bear/code/2024-10-26-005721/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-26-005721/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005721/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-26-005721/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-26-005721/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/utils.py b/som_out/bear/code/2024-10-26-005721/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-26-005721/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-26-005721/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-26-005721/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-26-005721/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-26-005721/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-26-005721/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005721/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__init__.py b/som_out/bear/code/2024-10-26-005843/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/configs.py b/som_out/bear/code/2024-10-26-005843/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/__init__.py b/som_out/bear/code/2024-10-26-005843/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-26-005843/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-26-005843/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/colmap.py b/som_out/bear/code/2024-10-26-005843/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-26-005843/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/data/utils.py b/som_out/bear/code/2024-10-26-005843/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/init_utils.py b/som_out/bear/code/2024-10-26-005843/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/loss_utils.py b/som_out/bear/code/2024-10-26-005843/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/metrics.py b/som_out/bear/code/2024-10-26-005843/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/params.py b/som_out/bear/code/2024-10-26-005843/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/renderer.py b/som_out/bear/code/2024-10-26-005843/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/scene_model.py b/som_out/bear/code/2024-10-26-005843/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-26-005843/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/trainer.py b/som_out/bear/code/2024-10-26-005843/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/trajectories.py b/som_out/bear/code/2024-10-26-005843/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/transforms.py b/som_out/bear/code/2024-10-26-005843/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/validator.py b/som_out/bear/code/2024-10-26-005843/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-26-005843/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005843/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-26-005843/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-26-005843/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/utils.py b/som_out/bear/code/2024-10-26-005843/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-26-005843/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-26-005843/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-26-005843/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-26-005843/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-26-005843/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-26-005843/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005843/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__init__.py b/som_out/bear/code/2024-10-26-005936/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/configs.py b/som_out/bear/code/2024-10-26-005936/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/__init__.py b/som_out/bear/code/2024-10-26-005936/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-26-005936/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-26-005936/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/colmap.py b/som_out/bear/code/2024-10-26-005936/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-26-005936/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/data/utils.py b/som_out/bear/code/2024-10-26-005936/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/init_utils.py b/som_out/bear/code/2024-10-26-005936/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/loss_utils.py b/som_out/bear/code/2024-10-26-005936/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/metrics.py b/som_out/bear/code/2024-10-26-005936/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/params.py b/som_out/bear/code/2024-10-26-005936/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/renderer.py b/som_out/bear/code/2024-10-26-005936/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/scene_model.py b/som_out/bear/code/2024-10-26-005936/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-26-005936/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/trainer.py b/som_out/bear/code/2024-10-26-005936/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/trajectories.py b/som_out/bear/code/2024-10-26-005936/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/transforms.py b/som_out/bear/code/2024-10-26-005936/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/validator.py b/som_out/bear/code/2024-10-26-005936/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-26-005936/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-005936/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-26-005936/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-26-005936/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/utils.py b/som_out/bear/code/2024-10-26-005936/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-26-005936/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-26-005936/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-26-005936/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-26-005936/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-26-005936/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-26-005936/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-005936/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__init__.py b/som_out/bear/code/2024-10-26-010130/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/configs.py b/som_out/bear/code/2024-10-26-010130/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/__init__.py b/som_out/bear/code/2024-10-26-010130/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-26-010130/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-26-010130/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/colmap.py b/som_out/bear/code/2024-10-26-010130/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-26-010130/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/data/utils.py b/som_out/bear/code/2024-10-26-010130/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/init_utils.py b/som_out/bear/code/2024-10-26-010130/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/loss_utils.py b/som_out/bear/code/2024-10-26-010130/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/metrics.py b/som_out/bear/code/2024-10-26-010130/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/params.py b/som_out/bear/code/2024-10-26-010130/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/renderer.py b/som_out/bear/code/2024-10-26-010130/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/scene_model.py b/som_out/bear/code/2024-10-26-010130/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-26-010130/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/trainer.py b/som_out/bear/code/2024-10-26-010130/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/trajectories.py b/som_out/bear/code/2024-10-26-010130/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/transforms.py b/som_out/bear/code/2024-10-26-010130/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/validator.py b/som_out/bear/code/2024-10-26-010130/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-26-010130/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-010130/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-26-010130/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-26-010130/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/utils.py b/som_out/bear/code/2024-10-26-010130/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-26-010130/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-26-010130/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-26-010130/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-26-010130/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-26-010130/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-26-010130/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-010130/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__init__.py b/som_out/bear/code/2024-10-26-011247/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/configs.py b/som_out/bear/code/2024-10-26-011247/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/__init__.py b/som_out/bear/code/2024-10-26-011247/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-26-011247/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-26-011247/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/colmap.py b/som_out/bear/code/2024-10-26-011247/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-26-011247/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/data/utils.py b/som_out/bear/code/2024-10-26-011247/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/init_utils.py b/som_out/bear/code/2024-10-26-011247/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/loss_utils.py b/som_out/bear/code/2024-10-26-011247/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/metrics.py b/som_out/bear/code/2024-10-26-011247/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/params.py b/som_out/bear/code/2024-10-26-011247/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/renderer.py b/som_out/bear/code/2024-10-26-011247/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/scene_model.py b/som_out/bear/code/2024-10-26-011247/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-26-011247/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/trainer.py b/som_out/bear/code/2024-10-26-011247/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/trajectories.py b/som_out/bear/code/2024-10-26-011247/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/transforms.py b/som_out/bear/code/2024-10-26-011247/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/validator.py b/som_out/bear/code/2024-10-26-011247/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-26-011247/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011247/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-26-011247/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-26-011247/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/utils.py b/som_out/bear/code/2024-10-26-011247/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-26-011247/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-26-011247/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-26-011247/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-26-011247/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-26-011247/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-26-011247/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011247/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__init__.py b/som_out/bear/code/2024-10-26-011902/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/params.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/configs.py b/som_out/bear/code/2024-10-26-011902/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/__init__.py b/som_out/bear/code/2024-10-26-011902/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/base_dataset.py b/som_out/bear/code/2024-10-26-011902/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/casual_dataset.py b/som_out/bear/code/2024-10-26-011902/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/colmap.py b/som_out/bear/code/2024-10-26-011902/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/iphone_dataset.py b/som_out/bear/code/2024-10-26-011902/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/data/utils.py b/som_out/bear/code/2024-10-26-011902/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/init_utils.py b/som_out/bear/code/2024-10-26-011902/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/loss_utils.py b/som_out/bear/code/2024-10-26-011902/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/metrics.py b/som_out/bear/code/2024-10-26-011902/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/params.py b/som_out/bear/code/2024-10-26-011902/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/renderer.py b/som_out/bear/code/2024-10-26-011902/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/scene_model.py b/som_out/bear/code/2024-10-26-011902/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/tensor_dataclass.py b/som_out/bear/code/2024-10-26-011902/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/trainer.py b/som_out/bear/code/2024-10-26-011902/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/trajectories.py b/som_out/bear/code/2024-10-26-011902/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/transforms.py b/som_out/bear/code/2024-10-26-011902/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/validator.py b/som_out/bear/code/2024-10-26-011902/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__init__.py b/som_out/bear/code/2024-10-26-011902/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/bear/code/2024-10-26-011902/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/playback_panel.py b/som_out/bear/code/2024-10-26-011902/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/render_panel.py b/som_out/bear/code/2024-10-26-011902/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/utils.py b/som_out/bear/code/2024-10-26-011902/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/bear/code/2024-10-26-011902/flow3d/vis/viewer.py b/som_out/bear/code/2024-10-26-011902/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/bear/code/2024-10-26-011902/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/bear/code/2024-10-26-011902/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/bear/code/2024-10-26-011902/scripts/evaluate_iphone.py b/som_out/bear/code/2024-10-26-011902/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/bear/code/2024-10-26-011902/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/bear/events.out.tfevents.1729875863.cvsv00-140.1364044.0 b/som_out/bear/events.out.tfevents.1729875863.cvsv00-140.1364044.0 deleted file mode 100644 index 20937c42caaa863fa72b33361463d24d58926bd0..0000000000000000000000000000000000000000 --- a/som_out/bear/events.out.tfevents.1729875863.cvsv00-140.1364044.0 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:15b67fd649f41ee1d3e9236ddabedfe5728b551bf894130c70da7fd5c02c1dc5 -size 1954 diff --git a/som_out/bear/events.out.tfevents.1729876371.cvsv00-140.1383415.0 b/som_out/bear/events.out.tfevents.1729876371.cvsv00-140.1383415.0 deleted file mode 100644 index ac506cd8d691635c29e184d93ffb7799a4715dc7..0000000000000000000000000000000000000000 --- a/som_out/bear/events.out.tfevents.1729876371.cvsv00-140.1383415.0 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:afa22c3b7ea6701b3eacc171e620bd42973e7fe217765aa629f2451fb02bad45 -size 168070 diff --git a/som_out/bear/events.out.tfevents.1729876747.cvsv00-140.1397593.0 b/som_out/bear/events.out.tfevents.1729876747.cvsv00-140.1397593.0 deleted file mode 100644 index 9ed6242bba7e4572422f4f080d3df739b3a24f45..0000000000000000000000000000000000000000 --- a/som_out/bear/events.out.tfevents.1729876747.cvsv00-140.1397593.0 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:350e37158a59503517662a6991b1d61bd982605d249a8167ec5caefae3253496 -size 5316378 diff --git a/som_out/dance-jump/cfg.yaml b/som_out/dance-jump/cfg.yaml deleted file mode 100644 index 2900019d58762b992ad9cebd631372769a6b9075..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/cfg.yaml +++ /dev/null @@ -1,68 +0,0 @@ -batch_size: 8 -data: - camera_type: droid_recon - depth_type: aligned_depth_anything - end: -1 - image_type: JPEGImages - load_from_cache: false - mask_erosion_radius: 3 - mask_type: Annotations - num_targets_per_frame: 4 - res: 480p - root_dir: SOM_data_lcx/Davis_Data - scene_norm_dict: null - seq_name: dance-jump - start: 0 - track_2d_type: bootstapir -loss: - w_depth_const: 0.1 - w_depth_grad: 1 - w_depth_reg: 0.5 - w_mask: 1.0 - w_rgb: 1.0 - w_scale_var: 0.01 - w_smooth_bases: 0.1 - w_smooth_tracks: 2.0 - w_track: 2.0 - w_z_accel: 1.0 -lr: - bg: - colors: 0.01 - means: 0.00016 - opacities: 0.05 - quats: 0.001 - scales: 0.005 - fg: - colors: 0.01 - means: 0.00016 - motion_coefs: 0.01 - opacities: 0.01 - quats: 0.001 - scales: 0.005 - motion_bases: - rots: 0.00016 - transls: 0.00016 -num_bg: 100000 -num_dl_workers: 4 -num_epochs: 500 -num_fg: 40000 -num_motion_bases: 10 -optim: - control_every: 100 - cull_opacity_threshold: 0.1 - cull_scale_threshold: 0.5 - cull_screen_threshold: 0.15 - densify_scale_threshold: 0.01 - densify_screen_threshold: 0.05 - densify_xys_grad_threshold: 0.0002 - max_steps: 5000 - reset_opacity_every_n_controls: 30 - stop_control_by_screen_steps: 4000 - stop_control_steps: 4000 - stop_densify_steps: 15000 - warmup_steps: 200 -port: null -save_videos_every: 50 -validate_every: 50 -vis_debug: false -work_dir: SOM_output_lcx/Davis_out/dance-jump diff --git a/som_out/dance-jump/checkpoints/last.ckpt b/som_out/dance-jump/checkpoints/last.ckpt deleted file mode 100644 index f2f54cbdc867a40dd79019460129394dfa6a0fdb..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/checkpoints/last.ckpt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:10f6aad721947e41211c2d217e0e47cc8bee0fb25781c7b4c427938fbb5f343f -size 82117096 diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__init__.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/params.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/configs.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__init__.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/base_dataset.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/casual_dataset.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/colmap.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/iphone_dataset.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/utils.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/init_utils.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/loss_utils.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/metrics.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/params.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/renderer.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/scene_model.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/tensor_dataclass.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/trainer.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/trajectories.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/transforms.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/validator.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__init__.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/playback_panel.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/render_panel.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/utils.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/viewer.py b/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/dance-jump/code/2024-10-26-011416/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/dance-jump/code/2024-10-26-011416/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/dance-jump/code/2024-10-26-011416/scripts/evaluate_iphone.py b/som_out/dance-jump/code/2024-10-26-011416/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011416/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__init__.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/params.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/configs.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__init__.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/base_dataset.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/casual_dataset.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/colmap.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/iphone_dataset.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/utils.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/init_utils.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/loss_utils.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/metrics.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/params.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/renderer.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/scene_model.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/tensor_dataclass.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/trainer.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/trajectories.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/transforms.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/validator.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__init__.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/playback_panel.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/render_panel.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/utils.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/viewer.py b/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/dance-jump/code/2024-10-26-011904/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/dance-jump/code/2024-10-26-011904/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/dance-jump/code/2024-10-26-011904/scripts/evaluate_iphone.py b/som_out/dance-jump/code/2024-10-26-011904/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/code/2024-10-26-011904/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/dance-jump/events.out.tfevents.1729876571.cvsv00-140.1387060.0 b/som_out/dance-jump/events.out.tfevents.1729876571.cvsv00-140.1387060.0 deleted file mode 100644 index 67fc1727623b9538e9f817d454452fc3f8234f0d..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/events.out.tfevents.1729876571.cvsv00-140.1387060.0 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8aa210bbc2800a6d8eb369d301a4f61dad2381f4056d4c69b411d02a5d7c5b34 -size 291846 diff --git a/som_out/dance-jump/events.out.tfevents.1729876749.cvsv00-140.1397717.0 b/som_out/dance-jump/events.out.tfevents.1729876749.cvsv00-140.1397717.0 deleted file mode 100644 index 8fa31ab65585b36b9be2ae5a1fcaa5bc8ed132b4..0000000000000000000000000000000000000000 --- a/som_out/dance-jump/events.out.tfevents.1729876749.cvsv00-140.1397717.0 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5140e41981c403dc9f0105cd6393ec1959e21a57529bfa907a6bb95f0e6b48c2 -size 3682424 diff --git a/som_out/horsejump-high/cfg.yaml b/som_out/horsejump-high/cfg.yaml deleted file mode 100644 index 7aad1069fe78382d92ef5e966e8aab7c07acea7d..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/cfg.yaml +++ /dev/null @@ -1,68 +0,0 @@ -batch_size: 8 -data: - camera_type: droid_recon - depth_type: aligned_depth_anything - end: -1 - image_type: JPEGImages - load_from_cache: false - mask_erosion_radius: 3 - mask_type: Annotations - num_targets_per_frame: 4 - res: 480p - root_dir: SOM_data_lcx/Davis_Data - scene_norm_dict: null - seq_name: horsejump-high - start: 0 - track_2d_type: bootstapir -loss: - w_depth_const: 0.1 - w_depth_grad: 1 - w_depth_reg: 0.5 - w_mask: 1.0 - w_rgb: 1.0 - w_scale_var: 0.01 - w_smooth_bases: 0.1 - w_smooth_tracks: 2.0 - w_track: 2.0 - w_z_accel: 1.0 -lr: - bg: - colors: 0.01 - means: 0.00016 - opacities: 0.05 - quats: 0.001 - scales: 0.005 - fg: - colors: 0.01 - means: 0.00016 - motion_coefs: 0.01 - opacities: 0.01 - quats: 0.001 - scales: 0.005 - motion_bases: - rots: 0.00016 - transls: 0.00016 -num_bg: 100000 -num_dl_workers: 4 -num_epochs: 500 -num_fg: 40000 -num_motion_bases: 10 -optim: - control_every: 100 - cull_opacity_threshold: 0.1 - cull_scale_threshold: 0.5 - cull_screen_threshold: 0.15 - densify_scale_threshold: 0.01 - densify_screen_threshold: 0.05 - densify_xys_grad_threshold: 0.0002 - max_steps: 5000 - reset_opacity_every_n_controls: 30 - stop_control_by_screen_steps: 4000 - stop_control_steps: 4000 - stop_densify_steps: 15000 - warmup_steps: 200 -port: null -save_videos_every: 50 -validate_every: 50 -vis_debug: false -work_dir: SOM_output_lcx/Davis_out/horsejump-high diff --git a/som_out/horsejump-high/checkpoints/last.ckpt b/som_out/horsejump-high/checkpoints/last.ckpt deleted file mode 100644 index 3194fd2ede4f4ca3278429570c61aef69192b6bd..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/checkpoints/last.ckpt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bb00b754730b8c4211e2844d5b89da552b4b2cbc3abbb79d5f479b28dbbb2453 -size 72007272 diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__init__.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/params.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/configs.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__init__.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/base_dataset.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/casual_dataset.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/colmap.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/iphone_dataset.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/utils.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/init_utils.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/loss_utils.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/metrics.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/params.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/renderer.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/scene_model.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/tensor_dataclass.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/trainer.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/trajectories.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/transforms.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/validator.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__init__.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/playback_panel.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/render_panel.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/utils.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/viewer.py b/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/horsejump-high/code/2024-10-26-011528/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/horsejump-high/code/2024-10-26-011528/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/horsejump-high/code/2024-10-26-011528/scripts/evaluate_iphone.py b/som_out/horsejump-high/code/2024-10-26-011528/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-011528/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__init__.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/params.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/configs.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__init__.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/base_dataset.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/casual_dataset.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/colmap.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/iphone_dataset.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/utils.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/init_utils.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/loss_utils.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/metrics.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/params.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/renderer.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/scene_model.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/tensor_dataclass.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/trainer.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/trajectories.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/transforms.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/validator.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__init__.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/playback_panel.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/render_panel.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/utils.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/viewer.py b/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/horsejump-high/code/2024-10-26-014752/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/horsejump-high/code/2024-10-26-014752/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/horsejump-high/code/2024-10-26-014752/scripts/evaluate_iphone.py b/som_out/horsejump-high/code/2024-10-26-014752/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-014752/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__init__.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/params.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/configs.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__init__.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/base_dataset.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/casual_dataset.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/colmap.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/iphone_dataset.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/utils.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/init_utils.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/loss_utils.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/metrics.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/params.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/renderer.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/scene_model.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/tensor_dataclass.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/trainer.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/trajectories.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/transforms.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/validator.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__init__.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/playback_panel.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/render_panel.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/utils.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/viewer.py b/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/horsejump-high/code/2024-10-26-095302/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/horsejump-high/code/2024-10-26-095302/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/horsejump-high/code/2024-10-26-095302/scripts/evaluate_iphone.py b/som_out/horsejump-high/code/2024-10-26-095302/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/code/2024-10-26-095302/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/horsejump-high/events.out.tfevents.1729876637.cvsv00-140.1390067.0 b/som_out/horsejump-high/events.out.tfevents.1729876637.cvsv00-140.1390067.0 deleted file mode 100644 index e0d45ba7b267b50368210f2dcb31da0f1fc0ca47..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/events.out.tfevents.1729876637.cvsv00-140.1390067.0 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5aadabdf7e0058398bbb6205272ca9554581c17f00a3e9039bee37448faf03e4 -size 184509 diff --git a/som_out/horsejump-high/events.out.tfevents.1729878476.cvsv00-140.1454728.0 b/som_out/horsejump-high/events.out.tfevents.1729878476.cvsv00-140.1454728.0 deleted file mode 100644 index 61e45ed0f45de0f84f13e4b9197eb2078a1b5a88..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/events.out.tfevents.1729878476.cvsv00-140.1454728.0 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f71db5626a7caf2fa73b66928525cd63b28e2690f2a489c76b4fd317fcdf7876 -size 3382378 diff --git a/som_out/horsejump-high/events.out.tfevents.1729907587.cvsv00-140.2315804.0 b/som_out/horsejump-high/events.out.tfevents.1729907587.cvsv00-140.2315804.0 deleted file mode 100644 index 04f76b10ebd19c119768c9eb54830f16696920b0..0000000000000000000000000000000000000000 --- a/som_out/horsejump-high/events.out.tfevents.1729907587.cvsv00-140.2315804.0 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c0770f75f4b44b784eb61e62ac455762077cb754154c0975f765132a7aa4825d -size 88 diff --git a/som_out/swing/cfg.yaml b/som_out/swing/cfg.yaml deleted file mode 100644 index fc8dd7eced0ab52b16f88b2f12c43d90e9431174..0000000000000000000000000000000000000000 --- a/som_out/swing/cfg.yaml +++ /dev/null @@ -1,68 +0,0 @@ -batch_size: 8 -data: - camera_type: droid_recon - depth_type: aligned_depth_anything - end: -1 - image_type: JPEGImages - load_from_cache: false - mask_erosion_radius: 3 - mask_type: Annotations - num_targets_per_frame: 4 - res: 480p - root_dir: SOM_data_lcx/Davis_Data - scene_norm_dict: null - seq_name: swing - start: 0 - track_2d_type: bootstapir -loss: - w_depth_const: 0.1 - w_depth_grad: 1 - w_depth_reg: 0.5 - w_mask: 1.0 - w_rgb: 1.0 - w_scale_var: 0.01 - w_smooth_bases: 0.1 - w_smooth_tracks: 2.0 - w_track: 2.0 - w_z_accel: 1.0 -lr: - bg: - colors: 0.01 - means: 0.00016 - opacities: 0.05 - quats: 0.001 - scales: 0.005 - fg: - colors: 0.01 - means: 0.00016 - motion_coefs: 0.01 - opacities: 0.01 - quats: 0.001 - scales: 0.005 - motion_bases: - rots: 0.00016 - transls: 0.00016 -num_bg: 100000 -num_dl_workers: 4 -num_epochs: 500 -num_fg: 40000 -num_motion_bases: 10 -optim: - control_every: 100 - cull_opacity_threshold: 0.1 - cull_scale_threshold: 0.5 - cull_screen_threshold: 0.15 - densify_scale_threshold: 0.01 - densify_screen_threshold: 0.05 - densify_xys_grad_threshold: 0.0002 - max_steps: 5000 - reset_opacity_every_n_controls: 30 - stop_control_by_screen_steps: 4000 - stop_control_steps: 4000 - stop_densify_steps: 15000 - warmup_steps: 200 -port: null -save_videos_every: 50 -validate_every: 50 -vis_debug: false -work_dir: SOM_output_lcx/Davis_out/swing diff --git a/som_out/swing/checkpoints/last.ckpt b/som_out/swing/checkpoints/last.ckpt deleted file mode 100644 index 7a871eec579bb4576ef363dd8cd0d6aaaed1afa7..0000000000000000000000000000000000000000 --- a/som_out/swing/checkpoints/last.ckpt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0c1d54e757ec8bc4167a9286b3549c46010dd194055e053e883c121549e4e1ca -size 86551400 diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__init__.py b/som_out/swing/code/2024-10-26-011330/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/params.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/configs.py b/som_out/swing/code/2024-10-26-011330/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/__init__.py b/som_out/swing/code/2024-10-26-011330/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/base_dataset.py b/som_out/swing/code/2024-10-26-011330/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/casual_dataset.py b/som_out/swing/code/2024-10-26-011330/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/colmap.py b/som_out/swing/code/2024-10-26-011330/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/iphone_dataset.py b/som_out/swing/code/2024-10-26-011330/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/data/utils.py b/som_out/swing/code/2024-10-26-011330/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/init_utils.py b/som_out/swing/code/2024-10-26-011330/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/loss_utils.py b/som_out/swing/code/2024-10-26-011330/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/metrics.py b/som_out/swing/code/2024-10-26-011330/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/params.py b/som_out/swing/code/2024-10-26-011330/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/renderer.py b/som_out/swing/code/2024-10-26-011330/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/scene_model.py b/som_out/swing/code/2024-10-26-011330/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/tensor_dataclass.py b/som_out/swing/code/2024-10-26-011330/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/trainer.py b/som_out/swing/code/2024-10-26-011330/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/trajectories.py b/som_out/swing/code/2024-10-26-011330/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/transforms.py b/som_out/swing/code/2024-10-26-011330/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/validator.py b/som_out/swing/code/2024-10-26-011330/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__init__.py b/som_out/swing/code/2024-10-26-011330/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-011330/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/playback_panel.py b/som_out/swing/code/2024-10-26-011330/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/render_panel.py b/som_out/swing/code/2024-10-26-011330/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/utils.py b/som_out/swing/code/2024-10-26-011330/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/swing/code/2024-10-26-011330/flow3d/vis/viewer.py b/som_out/swing/code/2024-10-26-011330/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/swing/code/2024-10-26-011330/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/swing/code/2024-10-26-011330/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/swing/code/2024-10-26-011330/scripts/evaluate_iphone.py b/som_out/swing/code/2024-10-26-011330/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-011330/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__init__.py b/som_out/swing/code/2024-10-26-020013/flow3d/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/__init__.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index c917636d5f7a4707008b0e28a248d759e6fe30ea..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/configs.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/configs.cpython-310.pyc deleted file mode 100644 index 2b6ae0b4ff8067c198ad4f0c03844f53ed04943e..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/configs.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/init_utils.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/init_utils.cpython-310.pyc deleted file mode 100644 index 856201cd11752f668308a1035babf46a636fb8c0..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/init_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/loss_utils.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/loss_utils.cpython-310.pyc deleted file mode 100644 index 2f85f970122e95a997881c9ac7890fa71dd0ab0c..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/loss_utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/metrics.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/metrics.cpython-310.pyc deleted file mode 100644 index 601c58bb93fd5a98bfc091a5c5743d7dc9d0ec39..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/metrics.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/params.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/params.cpython-310.pyc deleted file mode 100644 index da9d0a9218ddfc1014aa763186142a2dab56f3b1..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/params.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/scene_model.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/scene_model.cpython-310.pyc deleted file mode 100644 index 5f92bb81ab0b7b9bd1257a283dffa23b3c9afa1f..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/scene_model.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc deleted file mode 100644 index e566648f62b52c944d537b8076fdf2dce22f65af..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/tensor_dataclass.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/trainer.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/trainer.cpython-310.pyc deleted file mode 100644 index 3ddfeac1c605ffd692a1c6dcee0d0c82252c2dd6..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/trainer.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/transforms.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/transforms.cpython-310.pyc deleted file mode 100644 index 204a38b59f55e32aa5e50bf82c2daa6834615c7e..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/transforms.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/validator.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/validator.cpython-310.pyc deleted file mode 100644 index 7a034d366bed455ad62af6c9d5a4c0a02c866c13..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/__pycache__/validator.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/configs.py b/som_out/swing/code/2024-10-26-020013/flow3d/configs.py deleted file mode 100644 index bda0fff5f7560d6d2cec6c1890c99acb7d74ee71..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/configs.py +++ /dev/null @@ -1,67 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class FGLRConfig: - means: float = 1.6e-4 - opacities: float = 1e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - motion_coefs: float = 1e-2 - - -@dataclass -class BGLRConfig: - means: float = 1.6e-4 - opacities: float = 5e-2 - scales: float = 5e-3 - quats: float = 1e-3 - colors: float = 1e-2 - - -@dataclass -class MotionLRConfig: - rots: float = 1.6e-4 - transls: float = 1.6e-4 - - -@dataclass -class SceneLRConfig: - fg: FGLRConfig - bg: BGLRConfig - motion_bases: MotionLRConfig - - -@dataclass -class LossesConfig: - w_rgb: float = 1.0 - w_depth_reg: float = 0.5 - w_depth_const: float = 0.1 - w_depth_grad: float = 1 - w_track: float = 2.0 - w_mask: float = 1.0 - w_smooth_bases: float = 0.1 - w_smooth_tracks: float = 2.0 - w_scale_var: float = 0.01 - w_z_accel: float = 1.0 - - -@dataclass -class OptimizerConfig: - max_steps: int = 5000 - ## Adaptive gaussian control - warmup_steps: int = 200 - control_every: int = 100 - reset_opacity_every_n_controls: int = 30 - stop_control_by_screen_steps: int = 4000 - stop_control_steps: int = 4000 - ### Densify. - densify_xys_grad_threshold: float = 0.0002 - densify_scale_threshold: float = 0.01 - densify_screen_threshold: float = 0.05 - stop_densify_steps: int = 15000 - ### Cull. - cull_opacity_threshold: float = 0.1 - cull_scale_threshold: float = 0.5 - cull_screen_threshold: float = 0.15 diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/__init__.py b/som_out/swing/code/2024-10-26-020013/flow3d/data/__init__.py deleted file mode 100644 index 45685985e850017fc12f61c5a149cd8cdc12d728..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/data/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import asdict, replace - -from torch.utils.data import Dataset - -from .base_dataset import BaseDataset -from .casual_dataset import CasualDataset, CustomDataConfig, DavisDataConfig -from .iphone_dataset import ( - iPhoneDataConfig, - iPhoneDataConfig_Crafter, - iPhoneDataset, - iPhoneDatasetKeypointView, - iPhoneDatasetVideoView, -) - - -def get_train_val_datasets( - data_cfg: iPhoneDataConfig | DavisDataConfig | CustomDataConfig | iPhoneDataConfig_Crafter, load_val: bool -) -> tuple[BaseDataset, Dataset | None, Dataset | None, Dataset | None]: - train_video_view = None - val_img_dataset = None - val_kpt_dataset = None - if isinstance(data_cfg, iPhoneDataConfig) or isinstance(data_cfg, iPhoneDataConfig_Crafter): - train_dataset = iPhoneDataset(**asdict(data_cfg)) - train_video_view = iPhoneDatasetVideoView(train_dataset) - if load_val: - val_img_dataset = ( - iPhoneDataset( - **asdict(replace(data_cfg, split="val", load_from_cache=True)) - ) - if train_dataset.has_validation - else None - ) - val_kpt_dataset = iPhoneDatasetKeypointView(train_dataset) - elif isinstance(data_cfg, DavisDataConfig) or isinstance( - data_cfg, CustomDataConfig - ): - train_dataset = CasualDataset(**asdict(data_cfg)) - else: - raise ValueError(f"Unknown data config: {data_cfg}") - return train_dataset, train_video_view, val_img_dataset, val_kpt_dataset diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/__init__.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a459b62f68a65b4c199e7f144cc292fd6cf9c84a..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/base_dataset.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/base_dataset.cpython-310.pyc deleted file mode 100644 index 64078ac09c63f899c53589c9eccddad0f180558f..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/base_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc deleted file mode 100644 index 0e12be124c07bc40149b0b8bcafd1413e94f8fd0..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/casual_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/colmap.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/colmap.cpython-310.pyc deleted file mode 100644 index 5e0ab29fdf24ee2096dfbfe32b48c53463e1fe55..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/colmap.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc deleted file mode 100644 index e59615fbe617b6c227e53457cbb4a056b333e591..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/iphone_dataset.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/utils.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index 200f97129de5c41990fbcd698dd6cd1a21044542..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/data/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/base_dataset.py b/som_out/swing/code/2024-10-26-020013/flow3d/data/base_dataset.py deleted file mode 100644 index fa82267c3f4abdac6f7bb25365c8d465d522b690..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/data/base_dataset.py +++ /dev/null @@ -1,77 +0,0 @@ -from abc import abstractmethod - -import torch -from torch.utils.data import Dataset, default_collate - - -class BaseDataset(Dataset): - @property - @abstractmethod - def num_frames(self) -> int: ... - - @property - def keyframe_idcs(self) -> torch.Tensor: - return torch.arange(self.num_frames) - - @abstractmethod - def get_w2cs(self) -> torch.Tensor: ... - - @abstractmethod - def get_Ks(self) -> torch.Tensor: ... - - @abstractmethod - def get_image(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_depth(self, index: int) -> torch.Tensor: ... - - @abstractmethod - def get_mask(self, index: int) -> torch.Tensor: ... - - def get_img_wh(self) -> tuple[int, int]: ... - - @abstractmethod - def get_tracks_3d( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns 3D tracks: - coordinates (N, T, 3), - visibles (N, T), - invisibles (N, T), - confidences (N, T), - colors (N, 3) - """ - ... - - @abstractmethod - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Returns background points: - coordinates (N, 3), - normals (N, 3), - colors (N, 3) - """ - ... - - @staticmethod - def train_collate_fn(batch): - collated = {} - for k in batch[0]: - if k not in [ - "query_tracks_2d", - "target_ts", - "target_w2cs", - "target_Ks", - "target_tracks_2d", - "target_visibles", - "target_track_depths", - "target_invisibles", - "target_confidences", - ]: - collated[k] = default_collate([sample[k] for sample in batch]) - else: - collated[k] = [sample[k] for sample in batch] - return collated diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/casual_dataset.py b/som_out/swing/code/2024-10-26-020013/flow3d/data/casual_dataset.py deleted file mode 100644 index e378c6d2408c8ae5d2cdaf45cf6f77dead5c7bd2..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/data/casual_dataset.py +++ /dev/null @@ -1,498 +0,0 @@ -import os -from dataclasses import dataclass -from functools import partial -from typing import Literal, cast - -import cv2 -import imageio -import numpy as np -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from roma import roma -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.utils import ( - UINT16_MAX, - SceneNormDict, - get_tracks_3d_for_query_frame, - median_filter_2d, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class DavisDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "480p" - image_type: str = "JPEGImages" - mask_type: str = "Annotations" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything" #"aligned_depthcrafter" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 3 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -@dataclass -class CustomDataConfig: - seq_name: str - root_dir: str - start: int = 0 - end: int = -1 - res: str = "" - image_type: str = "images" - mask_type: str = "masks" - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - ] = "aligned_depth_anything" - camera_type: Literal["droid_recon"] = "droid_recon" - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir" - mask_erosion_radius: int = 7 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - num_targets_per_frame: int = 4 - load_from_cache: bool = False - - -class CasualDataset(BaseDataset): - def __init__( - self, - seq_name: str, - root_dir: str, - start: int = 0, - end: int = -1, - res: str = "480p", - image_type: str = "JPEGImages", - mask_type: str = "Annotations", - depth_type: Literal[ - "aligned_depth_anything", - "aligned_depth_anything_v2", - "depth_anything", - "depth_anything_v2", - "unidepth_disp", - "aligned_depthcrafter", - ] = "aligned_depth_anything", #"aligned_depthcrafter", - camera_type: Literal["droid_recon"] = "droid_recon", - track_2d_type: Literal["bootstapir", "tapir"] = "bootstapir", - mask_erosion_radius: int = 3, - scene_norm_dict: SceneNormDict | None = None, - num_targets_per_frame: int = 4, - load_from_cache: bool = False, - **_, - ): - super().__init__() - - self.seq_name = seq_name - self.root_dir = root_dir - self.res = res - self.depth_type = depth_type - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - self.has_validation = False - self.mask_erosion_radius = mask_erosion_radius - - self.img_dir = f"{root_dir}/{image_type}/{res}/{seq_name}" - self.img_ext = os.path.splitext(os.listdir(self.img_dir)[0])[1] - self.depth_dir = f"{root_dir}/{depth_type}/{res}/{seq_name}" - self.mask_dir = f"{root_dir}/{mask_type}/{res}/{seq_name}" - self.tracks_dir = f"{root_dir}/{track_2d_type}/{res}/{seq_name}" - self.cache_dir = f"{root_dir}/flow3d_preprocessed/{res}/{seq_name}" - # self.cache_dir = f"datasets/davis/flow3d_preprocessed/{res}/{seq_name}" - frame_names = [os.path.splitext(p)[0] for p in sorted(os.listdir(self.img_dir))] - - if end == -1: - end = len(frame_names) - self.start = start - self.end = end - self.frame_names = frame_names[start:end] - - self.imgs: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.depths: list[torch.Tensor | None] = [None for _ in self.frame_names] - self.masks: list[torch.Tensor | None] = [None for _ in self.frame_names] - - # load cameras - if camera_type == "droid_recon": - img = self.get_image(0) - H, W = img.shape[:2] - w2cs, Ks, tstamps = load_cameras( - f"{root_dir}/{camera_type}/{seq_name}.npy", H, W - ) - else: - raise ValueError(f"Unknown camera type: {camera_type}") - assert ( - len(frame_names) == len(w2cs) == len(Ks) - ), f"{len(frame_names)}, {len(w2cs)}, {len(Ks)}" - self.w2cs = w2cs[start:end] - self.Ks = Ks[start:end] - tmask = (tstamps >= start) & (tstamps < end) - self._keyframe_idcs = tstamps[tmask] - start - self.scale = 1 - - if scene_norm_dict is None: - cached_scene_norm_dict_path = os.path.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if os.path.exists(cached_scene_norm_dict_path) and self.load_from_cache: - guru.info("loading cached scene norm dict...") - scene_norm_dict = torch.load( - os.path.join(self.cache_dir, "scene_norm_dict.pth") - ) - else: - tracks_3d = self.get_tracks_3d(5000, step=self.num_frames // 10)[0] - scale, transfm = compute_scene_norm(tracks_3d, self.w2cs) - scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - os.makedirs(self.cache_dir, exist_ok=True) - torch.save(scene_norm_dict, cached_scene_norm_dict_path) - - # transform cameras - self.scene_norm_dict = cast(SceneNormDict, scene_norm_dict) - self.scale = self.scene_norm_dict["scale"] - transform = self.scene_norm_dict["transfm"] - guru.info(f"scene norm {self.scale=}, {transform=}") - self.w2cs = torch.einsum("nij,jk->nik", self.w2cs, torch.linalg.inv(transform)) - self.w2cs[:, :3, 3] /= self.scale - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - @property - def keyframe_idcs(self) -> torch.Tensor: - return self._keyframe_idcs - - def __len__(self): - return len(self.frame_names) - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_img_wh(self) -> tuple[int, int]: - return self.get_image(0).shape[1::-1] - - def get_image(self, index) -> torch.Tensor: - if self.imgs[index] is None: - self.imgs[index] = self.load_image(index) - img = cast(torch.Tensor, self.imgs[index]) - return img - - def get_mask(self, index) -> torch.Tensor: - if self.masks[index] is None: - self.masks[index] = self.load_mask(index) - mask = cast(torch.Tensor, self.masks[index]) - return mask - - def get_depth(self, index) -> torch.Tensor: - if self.depths[index] is None: - self.depths[index] = self.load_depth(index) - return self.depths[index] / self.scale - - def load_image(self, index) -> torch.Tensor: - path = f"{self.img_dir}/{self.frame_names[index]}{self.img_ext}" - return torch.from_numpy(imageio.imread(path)).float() / 255.0 - - def load_mask(self, index) -> torch.Tensor: - path = f"{self.mask_dir}/{self.frame_names[index]}.png" - r = self.mask_erosion_radius - mask = imageio.imread(path) - fg_mask = mask.reshape((*mask.shape[:2], -1)).max(axis=-1) > 0 - bg_mask = ~fg_mask - fg_mask_erode = cv2.erode( - fg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - bg_mask_erode = cv2.erode( - bg_mask.astype(np.uint8), np.ones((r, r), np.uint8), iterations=1 - ) - out_mask = np.zeros_like(fg_mask, dtype=np.float32) - out_mask[bg_mask_erode > 0] = -1 - out_mask[fg_mask_erode > 0] = 1 - return torch.from_numpy(out_mask).float() - - def load_depth(self, index) -> torch.Tensor: - path = f"{self.depth_dir}/{self.frame_names[index]}.npy" - disp = np.load(path) - depth = 1.0 / np.clip(disp, a_min=1e-6, a_max=1e6) - depth = torch.from_numpy(depth).float() - depth = median_filter_2d(depth[None, None], 11, 1)[0, 0] - return depth - - def load_target_tracks( - self, query_index: int, target_indices: list[int], dim: int = 1 - ): - """ - tracks are 2d, occs and uncertainties - :param dim (int), default 1: dimension to stack the time axis - return (N, T, 4) if dim=1, (T, N, 4) if dim=0 - """ - q_name = self.frame_names[query_index] - all_tracks = [] - for ti in target_indices: - t_name = self.frame_names[ti] - path = f"{self.tracks_dir}/{q_name}_{t_name}.npy" - tracks = np.load(path).astype(np.float32) - all_tracks.append(tracks) - return torch.from_numpy(np.stack(all_tracks, axis=dim)) - - def get_tracks_3d( - self, num_samples: int, start: int = 0, end: int = -1, step: int = 1, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - num_frames = self.num_frames - if end < 0: - end = num_frames + 1 + end - query_idcs = list(range(start, end, step)) - target_idcs = list(range(start, end, step)) - masks = torch.stack([self.get_mask(i) for i in target_idcs], dim=0) - fg_masks = (masks == 1).float() - depths = torch.stack([self.get_depth(i) for i in target_idcs], dim=0) - inv_Ks = torch.linalg.inv(self.Ks[target_idcs]) - c2ws = torch.linalg.inv(self.w2cs[target_idcs]) - - num_per_query_frame = int(np.ceil(num_samples / len(query_idcs))) - cur_num = 0 - tracks_all_queries = [] - for q_idx in query_idcs: - # (N, T, 4) - tracks_2d = self.load_target_tracks(q_idx, target_idcs) - num_sel = int( - min(num_per_query_frame, num_samples - cur_num, len(tracks_2d)) - ) - if num_sel < len(tracks_2d): - sel_idcs = np.random.choice(len(tracks_2d), num_sel, replace=False) - tracks_2d = tracks_2d[sel_idcs] - cur_num += tracks_2d.shape[0] - img = self.get_image(q_idx) - tidx = target_idcs.index(q_idx) - tracks_tuple = get_tracks_3d_for_query_frame( - tidx, img, tracks_2d, depths, fg_masks, inv_Ks, c2ws - ) - tracks_all_queries.append(tracks_tuple) - tracks_3d, colors, visibles, invisibles, confidences = map( - partial(torch.cat, dim=0), zip(*tracks_all_queries) - ) - return tracks_3d, visibles, invisibles, confidences, colors - - def get_bkgd_points( - self, - num_samples: int, - use_kf_tstamps: bool = True, - stride: int = 8, - down_rate: int = 8, - min_per_frame: int = 64, - **kwargs, - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - start = 0 - end = self.num_frames - H, W = self.get_image(0).shape[:2] - grid = torch.stack( - torch.meshgrid( - torch.arange(0, W, dtype=torch.float32), - torch.arange(0, H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - - if use_kf_tstamps: - query_idcs = self.keyframe_idcs.tolist() - else: - num_query_frames = self.num_frames // stride - query_endpts = torch.linspace(start, end, num_query_frames + 1) - query_idcs = ((query_endpts[:-1] + query_endpts[1:]) / 2).long().tolist() - - bg_geometry = [] - print(f"{query_idcs=}") - for query_idx in tqdm(query_idcs, desc="Loading bkgd points", leave=False): - img = self.get_image(query_idx) - depth = self.get_depth(query_idx) - bg_mask = self.get_mask(query_idx) < 0 - bool_mask = (bg_mask * (depth > 0)).to(torch.bool) - w2c = self.w2cs[query_idx] - K = self.Ks[query_idx] - - # get the bounding box of previous points that reproject into frame - # inefficient but works for now - bmax_x, bmax_y, bmin_x, bmin_y = 0, 0, W, H - for p3d, _, _ in bg_geometry: - if len(p3d) < 1: - continue - # reproject into current frame - p2d = torch.einsum( - "ij,jk,pk->pi", K, w2c[:3], F.pad(p3d, (0, 1), value=1.0) - ) - p2d = p2d[:, :2] / p2d[:, 2:].clamp(min=1e-6) - xmin, xmax = p2d[:, 0].min().item(), p2d[:, 0].max().item() - ymin, ymax = p2d[:, 1].min().item(), p2d[:, 1].max().item() - - bmin_x = min(bmin_x, int(xmin)) - bmin_y = min(bmin_y, int(ymin)) - bmax_x = max(bmax_x, int(xmax)) - bmax_y = max(bmax_y, int(ymax)) - - # don't include points that are covered by previous points - bmin_x = max(0, bmin_x) - bmin_y = max(0, bmin_y) - bmax_x = min(W, bmax_x) - bmax_y = min(H, bmax_y) - overlap_mask = torch.ones_like(bool_mask) - overlap_mask[bmin_y:bmax_y, bmin_x:bmax_x] = 0 - - bool_mask &= overlap_mask - if bool_mask.sum() < min_per_frame: - guru.debug(f"skipping {query_idx=}") - continue - - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - - num_sel = max(len(points) // down_rate, min_per_frame) - sel_idcs = np.random.choice(len(points), num_sel, replace=False) - points = points[sel_idcs] - point_normals = point_normals[sel_idcs] - point_colors = point_colors[sel_idcs] - guru.debug(f"{query_idx=} {points.shape=}") - bg_geometry.append((points, point_normals, point_colors)) - - bg_points, bg_normals, bg_colors = map( - partial(torch.cat, dim=0), zip(*bg_geometry) - ) - if len(bg_points) > num_samples: - sel_idcs = np.random.choice(len(bg_points), num_samples, replace=False) - bg_points = bg_points[sel_idcs] - bg_normals = bg_normals[sel_idcs] - bg_colors = bg_colors[sel_idcs] - - return bg_points, bg_normals, bg_colors - - def __getitem__(self, index: int): - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": torch.tensor(index), - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.get_image(index), - "depths": self.get_depth(index), - } - tri_mask = self.get_mask(index) - valid_mask = tri_mask != 0 # not fg or bg - mask = tri_mask == 1 # fg mask - data["masks"] = mask.float() - data["valid_masks"] = valid_mask.float() - - # (P, 2) - query_tracks = self.load_target_tracks(index, [index])[:, 0, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4) - target_tracks = self.load_target_tracks(index, target_inds.tolist(), dim=0) - data["query_tracks_2d"] = query_tracks - data["target_ts"] = target_inds - data["target_w2cs"] = self.w2cs[target_inds] - data["target_Ks"] = self.Ks[target_inds] - data["target_tracks_2d"] = target_tracks[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info(target_tracks[..., 2], target_tracks[..., 3]) - # (N, H, W) - target_depths = torch.stack([self.get_depth(i) for i in target_inds], dim=0) - H, W = target_depths.shape[-2:] - data["target_track_depths"] = F.grid_sample( - target_depths[:, None], - normalize_coords(target_tracks[..., None, :2], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - return data - - -def load_cameras( - path: str, H: int, W: int -) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert os.path.exists(path), f"Camera file {path} does not exist." - recon = np.load(path, allow_pickle=True).item() - guru.debug(f"{recon.keys()=}") - traj_c2w = recon["traj_c2w"] # (N, 4, 4) - h, w = recon["img_shape"] - sy, sx = H / h, W / w - traj_w2c = np.linalg.inv(traj_c2w) - fx, fy, cx, cy = recon["intrinsics"] # (4,) - K = np.array([[fx * sx, 0, cx * sx], [0, fy * sy, cy * sy], [0, 0, 1]]) # (3, 3) - Ks = np.tile(K[None, ...], (len(traj_c2w), 1, 1)) # (N, 3, 3) - kf_tstamps = recon["tstamps"].astype("int") - return ( - torch.from_numpy(traj_w2c).float(), - torch.from_numpy(Ks).float(), - torch.from_numpy(kf_tstamps), - ) - - -def compute_scene_norm( - X: torch.Tensor, w2cs: torch.Tensor -) -> tuple[float, torch.Tensor]: - """ - :param X: [N*T, 3] - :param w2cs: [N, 4, 4] - """ - X = X.reshape(-1, 3) - scene_center = X.mean(dim=0) - X = X - scene_center[None] - min_scale = X.quantile(0.05, dim=0) - max_scale = X.quantile(0.95, dim=0) - scale = (max_scale - min_scale).max().item() / 2.0 - original_up = -F.normalize(w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - return scale, transfm - - -if __name__ == "__main__": - d = CasualDataset("bear", "/shared/vye/datasets/DAVIS", camera_type="droid_recon") diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/colmap.py b/som_out/swing/code/2024-10-26-020013/flow3d/data/colmap.py deleted file mode 100644 index bbfc67683bee48496671525aa259a5985b1b1483..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/data/colmap.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import struct -from dataclasses import dataclass -from pathlib import Path -from typing import Dict, Union - -import numpy as np - - -def get_colmap_camera_params(colmap_dir, img_files): - cameras = read_cameras_binary(colmap_dir + "/cameras.bin") - images = read_images_binary(colmap_dir + "/images.bin") - colmap_image_idcs = {v.name: k for k, v in images.items()} - img_names = [os.path.basename(img_file) for img_file in img_files] - num_imgs = len(img_names) - K_all = np.zeros((num_imgs, 4, 4)) - extrinsics_all = np.zeros((num_imgs, 4, 4)) - for idx, name in enumerate(img_names): - key = colmap_image_idcs[name] - image = images[key] - assert image.name == name - K, extrinsics = get_intrinsics_extrinsics(image, cameras) - K_all[idx] = K - extrinsics_all[idx] = extrinsics - - return K_all, extrinsics_all - - -@dataclass(frozen=True) -class CameraModel: - model_id: int - model_name: str - num_params: int - - -@dataclass(frozen=True) -class Camera: - id: int - model: str - width: int - height: int - params: np.ndarray - - -@dataclass(frozen=True) -class BaseImage: - id: int - qvec: np.ndarray - tvec: np.ndarray - camera_id: int - name: str - xys: np.ndarray - point3D_ids: np.ndarray - - -@dataclass(frozen=True) -class Point3D: - id: int - xyz: np.ndarray - rgb: np.ndarray - error: Union[float, np.ndarray] - image_ids: np.ndarray - point2D_idxs: np.ndarray - - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), -} -CAMERA_MODEL_IDS = dict( - [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] -) - - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - - -def read_cameras_text(path: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasText(const std::string& path) - void Reconstruction::ReadCamerasText(const std::string& path) - """ - cameras = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - camera_id = int(elems[0]) - model = elems[1] - width = int(elems[2]) - height = int(elems[3]) - params = np.array(tuple(map(float, elems[4:]))) - cameras[camera_id] = Camera( - id=camera_id, model=model, width=width, height=height, params=params - ) - return cameras - - -def read_cameras_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Camera]: - """ - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for camera_line_index in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ" - ) - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes( - fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params - ) - cameras[camera_id] = Camera( - id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params), - ) - assert len(cameras) == num_cameras - return cameras - - -def read_images_text(path: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesText(const std::string& path) - void Reconstruction::WriteImagesText(const std::string& path) - """ - images = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - image_id = int(elems[0]) - qvec = np.array(tuple(map(float, elems[1:5]))) - tvec = np.array(tuple(map(float, elems[5:8]))) - camera_id = int(elems[8]) - image_name = elems[9] - elems = fid.readline().split() - xys = np.column_stack( - [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, elems[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_images_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Image]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for image_index in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi" - ) - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - x_y_id_s = read_next_bytes( - fid, - num_bytes=24 * num_points2D, - format_char_sequence="ddq" * num_points2D, - ) - xys = np.column_stack( - [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] - ) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, - qvec=qvec, - tvec=tvec, - camera_id=camera_id, - name=image_name, - xys=xys, - point3D_ids=point3D_ids, - ) - return images - - -def read_points3D_text(path: Union[str, Path]): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DText(const std::string& path) - void Reconstruction::WritePoints3DText(const std::string& path) - """ - points3D = {} - with open(path, "r") as fid: - while True: - line = fid.readline() - if not line: - break - line = line.strip() - if len(line) > 0 and line[0] != "#": - elems = line.split() - point3D_id = int(elems[0]) - xyz = np.array(tuple(map(float, elems[1:4]))) - rgb = np.array(tuple(map(int, elems[4:7]))) - error = float(elems[7]) - image_ids = np.array(tuple(map(int, elems[8::2]))) - point2D_idxs = np.array(tuple(map(int, elems[9::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def read_points3d_binary(path_to_model_file: Union[str, Path]) -> Dict[int, Point3D]: - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadPoints3DBinary(const std::string& path) - void Reconstruction::WritePoints3DBinary(const std::string& path) - """ - points3D = {} - with open(path_to_model_file, "rb") as fid: - num_points = read_next_bytes(fid, 8, "Q")[0] - for point_line_index in range(num_points): - binary_point_line_properties = read_next_bytes( - fid, num_bytes=43, format_char_sequence="QdddBBBd" - ) - point3D_id = binary_point_line_properties[0] - xyz = np.array(binary_point_line_properties[1:4]) - rgb = np.array(binary_point_line_properties[4:7]) - error = np.array(binary_point_line_properties[7]) - track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ - 0 - ] - track_elems = read_next_bytes( - fid, - num_bytes=8 * track_length, - format_char_sequence="ii" * track_length, - ) - image_ids = np.array(tuple(map(int, track_elems[0::2]))) - point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) - points3D[point3D_id] = Point3D( - id=point3D_id, - xyz=xyz, - rgb=rgb, - error=error, - image_ids=image_ids, - point2D_idxs=point2D_idxs, - ) - return points3D - - -def qvec2rotmat(qvec): - return np.array( - [ - [ - 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], - ], - [ - 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], - ], - [ - 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, - ], - ] - ) - - -def get_intrinsics_extrinsics(img, cameras): - # world to cam transformation - R = qvec2rotmat(img.qvec) - # translation - t = img.tvec - cam = cameras[img.camera_id] - - if cam.model in ("SIMPLE_PINHOLE", "SIMPLE_RADIAL", "RADIAL"): - fx = fy = cam.params[0] - cx = cam.params[1] - cy = cam.params[2] - elif cam.model in ( - "PINHOLE", - "OPENCV", - "OPENCV_FISHEYE", - "FULL_OPENCV", - ): - fx = cam.params[0] - fy = cam.params[1] - cx = cam.params[2] - cy = cam.params[3] - else: - raise Exception("Camera model not supported") - - # intrinsics - K = np.identity(4) - K[0, 0] = fx - K[1, 1] = fy - K[0, 2] = cx - K[1, 2] = cy - - extrinsics = np.eye(4) - extrinsics[:3, :3] = R - extrinsics[:3, 3] = t - return K, extrinsics diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/iphone_dataset.py b/som_out/swing/code/2024-10-26-020013/flow3d/data/iphone_dataset.py deleted file mode 100644 index fb1580040e231f05c1395852842781807f277288..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/data/iphone_dataset.py +++ /dev/null @@ -1,865 +0,0 @@ -import json -import os -import os.path as osp -from dataclasses import dataclass -from glob import glob -from itertools import product -from typing import Literal - -import imageio.v3 as iio -import numpy as np -import roma -import torch -import torch.nn.functional as F -import tyro -from loguru import logger as guru -from torch.utils.data import Dataset -from tqdm import tqdm - -from flow3d.data.base_dataset import BaseDataset -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.data.utils import ( - SceneNormDict, - masked_median_blur, - normal_from_depth_image, - normalize_coords, - parse_tapir_track_info, -) -from flow3d.transforms import rt_to_mat4 - - -@dataclass -class iPhoneDataConfig: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_anything_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -@dataclass -class iPhoneDataConfig_Crafter: - data_dir: str - start: int = 0 - end: int = -1 - split: Literal["train", "val"] = "train" - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap" - camera_type: Literal["original", "refined"] = "refined" - use_median_filter: bool = False - num_targets_per_frame: int = 4 - scene_norm_dict: tyro.conf.Suppress[SceneNormDict | None] = None - load_from_cache: bool = False - skip_load_imgs: bool = False - - -class iPhoneDataset(BaseDataset): - def __init__( - self, - data_dir: str, - start: int = 0, - end: int = -1, - factor: int = 1, - split: Literal["train", "val"] = "train", - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - "depth_crafter_colmap", - ] = "depth_crafter_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - scene_norm_dict: SceneNormDict | None = None, - load_from_cache: bool = False, - skip_load_imgs: bool = False, - **_, - ): - super().__init__() - - self.data_dir = data_dir - self.training = split == "train" - self.split = split - self.factor = factor - self.start = start - self.end = end - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.scene_norm_dict = scene_norm_dict - self.load_from_cache = load_from_cache - self.cache_dir = osp.join(data_dir, "flow3d_preprocessed", "cache") - os.makedirs(self.cache_dir, exist_ok=True) - - print("!!!!depth_type!!!", depth_type) - - # Test if the current data has validation set. - with open(osp.join(data_dir, "splits", "val.json")) as f: - split_dict = json.load(f) - self.has_validation = len(split_dict["frame_names"]) > 0 - - # Load metadata. - with open(osp.join(data_dir, "splits", f"{split}.json")) as f: - split_dict = json.load(f) - full_len = len(split_dict["frame_names"]) - end = min(end, full_len) if end > 0 else full_len - self.end = end - self.frame_names = split_dict["frame_names"][start:end] - time_ids = [t for t in split_dict["time_ids"] if t >= start and t < end] - self.time_ids = torch.tensor(time_ids) - start - guru.info(f"{self.time_ids.min()=} {self.time_ids.max()=}") - # with open(osp.join(data_dir, "dataset.json")) as f: - # dataset_dict = json.load(f) - # self.num_frames = dataset_dict["num_exemplars"] - guru.info(f"{self.num_frames=}") - with open(osp.join(data_dir, "extra.json")) as f: - extra_dict = json.load(f) - self.fps = float(extra_dict["fps"]) - - # Load cameras. - if self.camera_type == "original": - Ks, w2cs = [], [] - for frame_name in self.frame_names: - with open(osp.join(data_dir, "camera", f"{frame_name}.json")) as f: - camera_dict = json.load(f) - focal_length = camera_dict["focal_length"] - principal_point = camera_dict["principal_point"] - Ks.append( - [ - [focal_length, 0.0, principal_point[0]], - [0.0, focal_length, principal_point[1]], - [0.0, 0.0, 1.0], - ] - ) - orientation = np.array(camera_dict["orientation"]) - position = np.array(camera_dict["position"]) - w2cs.append( - np.block( - [ - [orientation, -orientation @ position[:, None]], - [np.zeros((1, 3)), np.ones((1, 1))], - ] - ).astype(np.float32) - ) - self.Ks = torch.tensor(Ks) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(np.array(w2cs)) - elif self.camera_type == "refined": - Ks, w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [frame_name + ".png" for frame_name in self.frame_names], - ) - self.Ks = torch.from_numpy(Ks[:, :3, :3].astype(np.float32)) - self.Ks[:, :2] /= factor - self.w2cs = torch.from_numpy(w2cs.astype(np.float32)) - if not skip_load_imgs: - # Load images. - imgs = torch.from_numpy( - np.array( - [ - iio.imread( - osp.join(self.data_dir, f"rgb/{factor}x/{frame_name}.png") - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} images", - leave=False, - ) - ], - ) - ) - self.imgs = imgs[..., :3] / 255.0 - self.valid_masks = imgs[..., 3] / 255.0 - # Load masks. - self.masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/track_anything/", - f"{factor}x/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - if self.training: - # Load depths. - def load_depth(frame_name): - if self.depth_type == "lidar": - depth = np.load( - osp.join( - self.data_dir, - f"depth/{factor}x/{frame_name}.npy", - ) - )[..., 0] - else: - depth = np.load( - osp.join( - self.data_dir, - # f"flow3d_preprocessed/aligned_{self.depth_type}_allrect/", - # TODO: 1023 - f"flow3d_preprocessed/aligned_{self.depth_type}/", - # f"flow3d_preprocessed/noaligned_{self.depth_type}/", - f"{factor}x/{frame_name}.npy", - ) - ) - depth[depth < 1e-3] = 1e-3 - depth = 1.0 / depth - return depth - - self.depths = torch.from_numpy( - np.array( - [ - load_depth(frame_name) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} depths", - leave=False, - ) - ], - np.float32, - ) - ) - max_depth_values_per_frame = self.depths.reshape( - self.num_frames, -1 - ).max(1)[0] - max_depth_value = max_depth_values_per_frame.median() * 2.5 - print("max_depth_value", max_depth_value) - self.depths = torch.clamp(self.depths, 0, max_depth_value) - # Median filter depths. - # NOTE(hangg): This operator is very expensive. - if self.use_median_filter: - for i in tqdm( - range(self.num_frames), desc="Processing depths", leave=False - ): - depth = masked_median_blur( - self.depths[[i]].unsqueeze(1).to("cuda"), - ( - self.masks[[i]] - * self.valid_masks[[i]] - * (self.depths[[i]] > 0) - ) - .unsqueeze(1) - .to("cuda"), - )[0, 0].cpu() - self.depths[i] = depth * self.masks[i] + self.depths[i] * ( - 1 - self.masks[i] - ) - # Load the query pixels from 2D tracks. - self.query_tracks_2d = [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{factor}x/{frame_name}_{frame_name}.npy", - ) - ).astype(np.float32) - ) - for frame_name in self.frame_names - ] - guru.info( - f"{len(self.query_tracks_2d)=} {self.query_tracks_2d[0].shape=}" - ) - - # Load sam features. - # sam_feat_dir = osp.join( - # data_dir, f"flow3d_preprocessed/sam_features/{factor}x" - # ) - # assert osp.exists(sam_feat_dir), f"SAM features not exist!" - # sam_features, original_size, input_size = load_sam_features( - # sam_feat_dir, self.frame_names - # ) - # guru.info(f"{sam_features.shape=} {original_size=} {input_size=}") - # self.sam_features = sam_features - # self.sam_original_size = original_size - # self.sam_input_size = input_size - else: - # Load covisible masks. - self.covisible_masks = ( - torch.from_numpy( - np.array( - [ - iio.imread( - osp.join( - self.data_dir, - "flow3d_preprocessed/covisible/", - f"{factor}x/{split}/{frame_name}.png", - ) - ) - for frame_name in tqdm( - self.frame_names, - desc=f"Loading {self.split} covisible masks", - leave=False, - ) - ], - ) - ) - / 255.0 - ) - - if self.scene_norm_dict is None: - cached_scene_norm_dict_path = osp.join( - self.cache_dir, "scene_norm_dict.pth" - ) - if osp.exists(cached_scene_norm_dict_path) and self.load_from_cache: - print("loading cached scene norm dict...") - self.scene_norm_dict = torch.load( - osp.join(self.cache_dir, "scene_norm_dict.pth") - ) - elif self.training: - # Compute the scene scale and transform for normalization. - # Normalize the scene based on the foreground 3D tracks. - subsampled_tracks_3d = self.get_tracks_3d( - num_samples=10000, step=self.num_frames // 10, show_pbar=False - )[0] - scene_center = subsampled_tracks_3d.mean((0, 1)) - tracks_3d_centered = subsampled_tracks_3d - scene_center - min_scale = tracks_3d_centered.quantile(0.05, dim=0) - max_scale = tracks_3d_centered.quantile(0.95, dim=0) - scale = torch.max(max_scale - min_scale).item() / 2.0 - original_up = -F.normalize(self.w2cs[:, 1, :3].mean(0), dim=-1) - target_up = original_up.new_tensor([0.0, 0.0, 1.0]) - R = roma.rotvec_to_rotmat( - F.normalize(original_up.cross(target_up, dim=-1), dim=-1) - * original_up.dot(target_up).acos_() - ) - transfm = rt_to_mat4(R, torch.einsum("ij,j->i", -R, scene_center)) - self.scene_norm_dict = SceneNormDict(scale=scale, transfm=transfm) - torch.save(self.scene_norm_dict, cached_scene_norm_dict_path) - else: - raise ValueError("scene_norm_dict must be provided for validation.") - - # Normalize the scene. - scale = self.scene_norm_dict["scale"] - transfm = self.scene_norm_dict["transfm"] - self.w2cs = self.w2cs @ torch.linalg.inv(transfm) - self.w2cs[:, :3, 3] /= scale - if self.training and not skip_load_imgs: - self.depths /= scale - - if not skip_load_imgs: - guru.info( - f"{self.imgs.shape=} {self.valid_masks.shape=} {self.masks.shape=}" - ) - - @property - def num_frames(self) -> int: - return len(self.frame_names) - - def __len__(self): - return self.imgs.shape[0] - - def get_w2cs(self) -> torch.Tensor: - return self.w2cs - - def get_Ks(self) -> torch.Tensor: - return self.Ks - - def get_image(self, index: int) -> torch.Tensor: - return self.imgs[index] - - def get_depth(self, index: int) -> torch.Tensor: - return self.depths[index] - - def get_masks(self, index: int) -> torch.Tensor: - return self.masks[index] - - def get_img_wh(self) -> tuple[int, int]: - return iio.imread( - osp.join(self.data_dir, f"rgb/{self.factor}x/{self.frame_names[0]}.png") - ).shape[1::-1] - - # def get_sam_features(self) -> list[torch.Tensor, tuple[int, int], tuple[int, int]]: - # return self.sam_features, self.sam_original_size, self.sam_input_size - - def get_tracks_3d( - self, num_samples: int, step: int = 1, show_pbar: bool = True, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """Get 3D tracks from the dataset. - - Args: - num_samples (int | None): The number of samples to fetch. If None, - fetch all samples. If not None, fetch roughly a same number of - samples across each frame. Note that this might result in - number of samples less than what is specified. - step (int): The step to temporally subsample the track. - """ - assert ( - self.split == "train" - ), "fetch_tracks_3d is only available for the training split." - cached_track_3d_path = osp.join(self.cache_dir, f"tracks_3d_{num_samples}.pth") - if osp.exists(cached_track_3d_path) and step == 1 and self.load_from_cache: - print("loading cached 3d tracks data...") - start, end = self.start, self.end - cached_track_3d_data = torch.load(cached_track_3d_path) - tracks_3d, visibles, invisibles, confidences, track_colors = ( - cached_track_3d_data["tracks_3d"][:, start:end], - cached_track_3d_data["visibles"][:, start:end], - cached_track_3d_data["invisibles"][:, start:end], - cached_track_3d_data["confidences"][:, start:end], - cached_track_3d_data["track_colors"], - ) - return tracks_3d, visibles, invisibles, confidences, track_colors - - # Load 2D tracks. - raw_tracks_2d = [] - candidate_frames = list(range(0, self.num_frames, step)) - num_sampled_frames = len(candidate_frames) - for i in ( - tqdm(candidate_frames, desc="Loading 2D tracks", leave=False) - if show_pbar - else candidate_frames - ): - curr_num_samples = self.query_tracks_2d[i].shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - track_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - track_sels = np.arange(0, curr_num_samples) - curr_tracks_2d = [] - for j in range(0, self.num_frames, step): - if i == j: - target_tracks_2d = self.query_tracks_2d[i] - else: - target_tracks_2d = torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[i]}_" - f"{self.frame_names[j]}.npy", - ) - ).astype(np.float32) - ) - curr_tracks_2d.append(target_tracks_2d[track_sels]) - raw_tracks_2d.append(torch.stack(curr_tracks_2d, dim=1)) - guru.info(f"{step=} {len(raw_tracks_2d)=} {raw_tracks_2d[0].shape=}") - - # Process 3D tracks. - inv_Ks = torch.linalg.inv(self.Ks)[::step] - c2ws = torch.linalg.inv(self.w2cs)[::step] - H, W = self.imgs.shape[1:3] - filtered_tracks_3d, filtered_visibles, filtered_track_colors = [], [], [] - filtered_invisibles, filtered_confidences = [], [] - masks = self.masks * self.valid_masks * (self.depths > 0) - masks = (masks > 0.5).float() - for i, tracks_2d in enumerate(raw_tracks_2d): - tracks_2d = tracks_2d.swapdims(0, 1) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - track_depths = F.grid_sample( - self.depths[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - padding_mode="border", - )[:, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths - ) - tracks_3d = torch.einsum( - "nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0) - )[..., :3] - # Filter out out-of-mask tracks. - is_in_masks = ( - F.grid_sample( - masks[::step, None], - normalize_coords(tracks_2d[..., None, :], H, W), - align_corners=True, - ).squeeze() - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - # Get track's color from the query frame. - track_colors = ( - F.grid_sample( - self.imgs[i * step : i * step + 1].permute(0, 3, 1, 2), - normalize_coords(tracks_2d[i : i + 1, None, :], H, W), - align_corners=True, - padding_mode="border", - ) - .squeeze() - .T - ) - # at least visible 5% of the time, otherwise discard - visible_counts = visibles.sum(0) - valid = visible_counts >= min( - int(0.05 * self.num_frames), - visible_counts.float().quantile(0.1).item(), - ) - - filtered_tracks_3d.append(tracks_3d[:, valid]) - filtered_visibles.append(visibles[:, valid]) - filtered_invisibles.append(invisibles[:, valid]) - filtered_confidences.append(confidences[:, valid]) - filtered_track_colors.append(track_colors[valid]) - - filtered_tracks_3d = torch.cat(filtered_tracks_3d, dim=1).swapdims(0, 1) - filtered_visibles = torch.cat(filtered_visibles, dim=1).swapdims(0, 1) - filtered_invisibles = torch.cat(filtered_invisibles, dim=1).swapdims(0, 1) - filtered_confidences = torch.cat(filtered_confidences, dim=1).swapdims(0, 1) - filtered_track_colors = torch.cat(filtered_track_colors, dim=0) - if step == 1: - torch.save( - { - "tracks_3d": filtered_tracks_3d, - "visibles": filtered_visibles, - "invisibles": filtered_invisibles, - "confidences": filtered_confidences, - "track_colors": filtered_track_colors, - }, - cached_track_3d_path, - ) - return ( - filtered_tracks_3d, - filtered_visibles, - filtered_invisibles, - filtered_confidences, - filtered_track_colors, - ) - - def get_bkgd_points( - self, num_samples: int, **kwargs - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - H, W = self.imgs.shape[1:3] - grid = torch.stack( - torch.meshgrid( - torch.arange(W, dtype=torch.float32), - torch.arange(H, dtype=torch.float32), - indexing="xy", - ), - dim=-1, - ) - candidate_frames = list(range(self.num_frames)) - num_sampled_frames = len(candidate_frames) - bkgd_points, bkgd_point_normals, bkgd_point_colors = [], [], [] - for i in tqdm(candidate_frames, desc="Loading bkgd points", leave=False): - img = self.imgs[i] - depth = self.depths[i] - bool_mask = ((1.0 - self.masks[i]) * self.valid_masks[i] * (depth > 0)).to( - torch.bool - ) - w2c = self.w2cs[i] - K = self.Ks[i] - points = ( - torch.einsum( - "ij,pj->pi", - torch.linalg.inv(K), - F.pad(grid[bool_mask], (0, 1), value=1.0), - ) - * depth[bool_mask][:, None] - ) - points = torch.einsum( - "ij,pj->pi", torch.linalg.inv(w2c)[:3], F.pad(points, (0, 1), value=1.0) - ) - point_normals = normal_from_depth_image(depth, K, w2c)[bool_mask] - point_colors = img[bool_mask] - curr_num_samples = points.shape[0] - num_samples_per_frame = ( - int(np.floor(num_samples / num_sampled_frames)) - if i != candidate_frames[-1] - else num_samples - - (num_sampled_frames - 1) - * int(np.floor(num_samples / num_sampled_frames)) - ) - if num_samples_per_frame < curr_num_samples: - point_sels = np.random.choice( - curr_num_samples, (num_samples_per_frame,), replace=False - ) - else: - point_sels = np.arange(0, curr_num_samples) - bkgd_points.append(points[point_sels]) - bkgd_point_normals.append(point_normals[point_sels]) - bkgd_point_colors.append(point_colors[point_sels]) - bkgd_points = torch.cat(bkgd_points, dim=0) - bkgd_point_normals = torch.cat(bkgd_point_normals, dim=0) - bkgd_point_colors = torch.cat(bkgd_point_colors, dim=0) - return bkgd_points, bkgd_point_normals, bkgd_point_colors - - def get_video_dataset(self) -> Dataset: - return iPhoneDatasetVideoView(self) - - def __getitem__(self, index: int): - if self.training: - index = np.random.randint(0, self.num_frames) - data = { - # (). - "frame_names": self.frame_names[index], - # (). - "ts": self.time_ids[index], - # (4, 4). - "w2cs": self.w2cs[index], - # (3, 3). - "Ks": self.Ks[index], - # (H, W, 3). - "imgs": self.imgs[index], - # (H, W). - "valid_masks": self.valid_masks[index], - # (H, W). - "masks": self.masks[index], - } - if self.training: - # (H, W). - data["depths"] = self.depths[index] - # (P, 2). - data["query_tracks_2d"] = self.query_tracks_2d[index][:, :2] - target_inds = torch.from_numpy( - np.random.choice( - self.num_frames, (self.num_targets_per_frame,), replace=False - ) - ) - # (N, P, 4). - target_tracks_2d = torch.stack( - [ - torch.from_numpy( - np.load( - osp.join( - self.data_dir, - "flow3d_preprocessed/2d_tracks/", - f"{self.factor}x/" - f"{self.frame_names[index]}_" - f"{self.frame_names[target_index.item()]}.npy", - ) - ).astype(np.float32) - ) - for target_index in target_inds - ], - dim=0, - ) - # (N,). - target_ts = self.time_ids[target_inds] - data["target_ts"] = target_ts - # (N, 4, 4). - data["target_w2cs"] = self.w2cs[target_ts] - # (N, 3, 3). - data["target_Ks"] = self.Ks[target_ts] - # (N, P, 2). - data["target_tracks_2d"] = target_tracks_2d[..., :2] - # (N, P). - ( - data["target_visibles"], - data["target_invisibles"], - data["target_confidences"], - ) = parse_tapir_track_info( - target_tracks_2d[..., 2], target_tracks_2d[..., 3] - ) - # (N, P). - data["target_track_depths"] = F.grid_sample( - self.depths[target_inds, None], - normalize_coords( - target_tracks_2d[..., None, :2], - self.imgs.shape[1], - self.imgs.shape[2], - ), - align_corners=True, - padding_mode="border", - )[:, 0, :, 0] - else: - # (H, W). - data["covisible_masks"] = self.covisible_masks[index] - return data - - def preprocess(self, data): - return data - - -class iPhoneDatasetKeypointView(Dataset): - """Return a dataset view of the annotated keypoints.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - assert self.dataset.split == "train" - # Load 2D keypoints. - keypoint_paths = sorted( - glob(osp.join(self.dataset.data_dir, "keypoint/2x/train/0_*.json")) - ) - keypoints = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints.append(json.load(f)) - time_ids = [ - int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths - ] - # only use time ids that are in the dataset. - start = self.dataset.start - time_ids = [t - start for t in time_ids if t - start in self.dataset.time_ids] - self.time_ids = torch.tensor(time_ids) - self.time_pairs = torch.tensor(list(product(self.time_ids, repeat=2))) - self.index_pairs = torch.tensor( - list(product(range(len(self.time_ids)), repeat=2)) - ) - self.keypoints = torch.tensor(keypoints, dtype=torch.float32) - self.keypoints[..., :2] *= 2.0 / self.dataset.factor - - def __len__(self): - return len(self.time_pairs) - - def __getitem__(self, index: int): - ts = self.time_pairs[index] - return { - "ts": ts, - "w2cs": self.dataset.w2cs[ts], - "Ks": self.dataset.Ks[ts], - "imgs": self.dataset.imgs[ts], - "keypoints": self.keypoints[self.index_pairs[index]], - } - - -class iPhoneDatasetVideoView(Dataset): - """Return a dataset view of the video trajectory.""" - - def __init__(self, dataset: iPhoneDataset): - super().__init__() - self.dataset = dataset - self.fps = self.dataset.fps - assert self.dataset.split == "train" - - def __len__(self): - return self.dataset.num_frames - - def __getitem__(self, index): - return { - "frame_names": self.dataset.frame_names[index], - "ts": index, - "w2cs": self.dataset.w2cs[index], - "Ks": self.dataset.Ks[index], - "imgs": self.dataset.imgs[index], - "depths": self.dataset.depths[index], - "masks": self.dataset.masks[index], - } - - -""" -class iPhoneDataModule(BaseDataModule[iPhoneDataset]): - def __init__( - self, - data_dir: str, - factor: int = 1, - start: int = 0, - end: int = -1, - depth_type: Literal[ - "midas", - "depth_anything", - "lidar", - "depth_anything_colmap", - ] = "depth_anything_colmap", - camera_type: Literal["original", "refined"] = "refined", - use_median_filter: bool = False, - num_targets_per_frame: int = 1, - load_from_cache: bool = False, - **kwargs, - ): - super().__init__(dataset_cls=iPhoneDataset, **kwargs) - self.data_dir = data_dir - self.start = start - self.end = end - self.factor = factor - self.depth_type = depth_type - self.camera_type = camera_type - self.use_median_filter = use_median_filter - self.num_targets_per_frame = num_targets_per_frame - self.load_from_cache = load_from_cache - - self.val_loader_tasks = ["img", "keypoint"] - - def setup(self, *_, **__) -> None: - guru.info("Loading train dataset...") - self.train_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=True, - split="train", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - num_targets_per_frame=self.num_targets_per_frame, - max_steps=self.max_steps * self.batch_size, - load_from_cache=self.load_from_cache, - ) - if self.train_dataset.has_validation: - guru.info("Loading val dataset...") - self.val_dataset = self.dataset_cls( - data_dir=self.data_dir, - training=False, - split="val", - start=self.start, - end=self.end, - factor=self.factor, - depth_type=self.depth_type, # type: ignore - camera_type=self.camera_type, # type: ignore - use_median_filter=self.use_median_filter, - scene_norm_dict=self.train_dataset.scene_norm_dict, - load_from_cache=self.load_from_cache, - ) - else: - # Dummy validation set. - self.val_dataset = TensorDataset(torch.zeros(0)) # type: ignore - self.keypoint_dataset = iPhoneDatasetKeypointView(self.train_dataset) - self.video_dataset = self.train_dataset.get_video_dataset() - guru.success("Loading finished!") - - def train_dataloader(self) -> DataLoader: - return DataLoader( - self.train_dataset, - batch_size=self.batch_size, - num_workers=self.num_workers, - collate_fn=iPhoneDataset.train_collate_fn, - ) - - def val_dataloader(self) -> list[DataLoader]: - return [DataLoader(self.val_dataset), DataLoader(self.keypoint_dataset)] - """ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/data/utils.py b/som_out/swing/code/2024-10-26-020013/flow3d/data/utils.py deleted file mode 100644 index 00841ce5236c9a3b98e4a7c7d0ad94a137b9df13..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/data/utils.py +++ /dev/null @@ -1,360 +0,0 @@ -from typing import List, Optional, Tuple, TypedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair, _quadruple - -UINT16_MAX = 65535 - - -class SceneNormDict(TypedDict): - scale: float - transfm: torch.Tensor - - -def to_device(batch, device): - if isinstance(batch, dict): - return {k: to_device(v, device) for k, v in batch.items()} - if isinstance(batch, (list, tuple)): - return [to_device(v, device) for v in batch] - if isinstance(batch, torch.Tensor): - return batch.to(device) - return batch - - -def normalize_coords(coords, h, w): - assert coords.shape[-1] == 2 - return coords / torch.tensor([w - 1.0, h - 1.0], device=coords.device) * 2 - 1.0 - - -def postprocess_occlusions(occlusions, expected_dist): - """Postprocess occlusions to boolean visible flag. - - Args: - occlusions: [-inf, inf], np.float32 - expected_dist:, [-inf, inf], np.float32 - - Returns: - visibles: bool - """ - - def sigmoid(x): - if x.dtype == np.ndarray: - return 1 / (1 + np.exp(-x)) - else: - return torch.sigmoid(x) - - visibles = (1 - sigmoid(occlusions)) * (1 - sigmoid(expected_dist)) > 0.5 - return visibles - - -def parse_tapir_track_info(occlusions, expected_dist): - """ - return: - valid_visible: mask of visible & confident points - valid_invisible: mask of invisible & confident points - confidence: clamped confidence scores (all < 0.5 -> 0) - """ - visiblility = 1 - F.sigmoid(occlusions) - confidence = 1 - F.sigmoid(expected_dist) - valid_visible = visiblility * confidence > 0.5 - valid_invisible = (1 - visiblility) * confidence > 0.5 - # set all confidence < 0.5 to 0 - confidence = confidence * (valid_visible | valid_invisible).float() - return valid_visible, valid_invisible, confidence - - -def get_tracks_3d_for_query_frame( - query_index: int, - query_img: torch.Tensor, - tracks_2d: torch.Tensor, - depths: torch.Tensor, - masks: torch.Tensor, - inv_Ks: torch.Tensor, - c2ws: torch.Tensor, -): - """ - :param query_index (int) - :param query_img [H, W, 3] - :param tracks_2d [N, T, 4] - :param depths [T, H, W] - :param masks [T, H, W] - :param inv_Ks [T, 3, 3] - :param c2ws [T, 4, 4] - returns ( - tracks_3d [N, T, 3] - track_colors [N, 3] - visibles [N, T] - invisibles [N, T] - confidences [N, T] - ) - """ - T, H, W = depths.shape - query_img = query_img[None].permute(0, 3, 1, 2) # (1, 3, H, W) - tracks_2d = tracks_2d.swapaxes(0, 1) # (T, N, 4) - tracks_2d, occs, dists = ( - tracks_2d[..., :2], - tracks_2d[..., 2], - tracks_2d[..., 3], - ) - # visibles = postprocess_occlusions(occs, dists) - # (T, N), (T, N), (T, N) - visibles, invisibles, confidences = parse_tapir_track_info(occs, dists) - # Unproject 2D tracks to 3D. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - track_depths = F.grid_sample( - depths[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - padding_mode="border", - )[:, 0, 0] - tracks_3d = ( - torch.einsum( - "nij,npj->npi", - inv_Ks, - F.pad(tracks_2d, (0, 1), value=1.0), - ) - * track_depths[..., None] - ) - tracks_3d = torch.einsum("nij,npj->npi", c2ws, F.pad(tracks_3d, (0, 1), value=1.0))[ - ..., :3 - ] - # Filter out out-of-mask tracks. - # (T, 1, H, W), (T, 1, N, 2) -> (T, 1, 1, N) - is_in_masks = ( - F.grid_sample( - masks[:, None], - normalize_coords(tracks_2d[:, None], H, W), - align_corners=True, - )[:, 0, 0] - == 1 - ) - visibles *= is_in_masks - invisibles *= is_in_masks - confidences *= is_in_masks.float() - - # valid if in the fg mask at least 40% of the time - # in_mask_counts = is_in_masks.sum(0) - # t = 0.25 - # thresh = min(t * T, in_mask_counts.float().quantile(t).item()) - # valid = in_mask_counts > thresh - valid = is_in_masks[query_index] - # valid if visible 5% of the time - visible_counts = visibles.sum(0) - valid = valid & ( - visible_counts - >= min( - int(0.05 * T), - visible_counts.float().quantile(0.1).item(), - ) - ) - - # Get track's color from the query frame. - # (1, 3, H, W), (1, 1, N, 2) -> (1, 3, 1, N) -> (N, 3) - track_colors = F.grid_sample( - query_img, - normalize_coords(tracks_2d[query_index : query_index + 1, None], H, W), - align_corners=True, - padding_mode="border", - )[0, :, 0].T - return ( - tracks_3d[:, valid].swapdims(0, 1), - track_colors[valid], - visibles[:, valid].swapdims(0, 1), - invisibles[:, valid].swapdims(0, 1), - confidences[:, valid].swapdims(0, 1), - ) - - -def _get_padding(x, k, stride, padding, same: bool): - if same: - ih, iw = x.size()[2:] - if ih % stride[0] == 0: - ph = max(k[0] - stride[0], 0) - else: - ph = max(k[0] - (ih % stride[0]), 0) - if iw % stride[1] == 0: - pw = max(k[1] - stride[1], 0) - else: - pw = max(k[1] - (iw % stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = padding - return padding - - -def median_filter_2d(x, kernel_size=3, stride=1, padding=1, same: bool = True): - """ - :param x [B, C, H, W] - """ - k = _pair(kernel_size) - stride = _pair(stride) # convert to tuple - padding = _quadruple(padding) # convert to l, r, t, b - # using existing pytorch functions and tensor ops so that we get autograd, - # would likely be more efficient to implement from scratch at C/Cuda level - x = F.pad(x, _get_padding(x, k, stride, padding, same), mode="reflect") - x = x.unfold(2, k[0], stride[0]).unfold(3, k[1], stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x - - -def masked_median_blur(image, mask, kernel_size=11): - """ - Args: - image: [B, C, H, W] - mask: [B, C, H, W] - kernel_size: int - """ - assert image.shape == mask.shape - if not isinstance(image, torch.Tensor): - raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") - - if not len(image.shape) == 4: - raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {image.shape}") - - padding: Tuple[int, int] = _compute_zero_padding((kernel_size, kernel_size)) - - # prepare kernel - kernel: torch.Tensor = get_binary_kernel2d((kernel_size, kernel_size)).to(image) - b, c, h, w = image.shape - - # map the local window to single vector - features: torch.Tensor = F.conv2d( - image.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - masks: torch.Tensor = F.conv2d( - mask.reshape(b * c, 1, h, w), kernel, padding=padding, stride=1 - ) - features = features.view(b, c, -1, h, w).permute( - 0, 1, 3, 4, 2 - ) # BxCxxHxWx(K_h * K_w) - min_value, max_value = features.min(), features.max() - masks = masks.view(b, c, -1, h, w).permute(0, 1, 3, 4, 2) # BxCxHxWx(K_h * K_w) - index_invalid = (1 - masks).nonzero(as_tuple=True) - index_b, index_c, index_h, index_w, index_k = index_invalid - features[(index_b[::2], index_c[::2], index_h[::2], index_w[::2], index_k[::2])] = ( - min_value - ) - features[ - (index_b[1::2], index_c[1::2], index_h[1::2], index_w[1::2], index_k[1::2]) - ] = max_value - # compute the median along the feature axis - median: torch.Tensor = torch.median(features, dim=-1)[0] - - return median - - -def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]: - r"""Utility function that computes zero padding tuple.""" - computed: List[int] = [(k - 1) // 2 for k in kernel_size] - return computed[0], computed[1] - - -def get_binary_kernel2d( - window_size: tuple[int, int] | int, - *, - device: Optional[torch.device] = None, - dtype: torch.dtype = torch.float32, -) -> torch.Tensor: - """ - from kornia - Create a binary kernel to extract the patches. - If the window size is HxW will create a (H*W)x1xHxW kernel. - """ - ky, kx = _unpack_2d_ks(window_size) - - window_range = kx * ky - - kernel = torch.zeros((window_range, window_range), device=device, dtype=dtype) - idx = torch.arange(window_range, device=device) - kernel[idx, idx] += 1.0 - return kernel.view(window_range, 1, ky, kx) - - -def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]: - if isinstance(kernel_size, int): - ky = kx = kernel_size - else: - assert len(kernel_size) == 2, "2D Kernel size should have a length of 2." - ky, kx = kernel_size - - ky = int(ky) - kx = int(kx) - - return (ky, kx) - - -## Functions from GaussianShader. -def ndc_2_cam(ndc_xyz, intrinsic, W, H): - inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) - cam_z = ndc_xyz[..., 2:3] - cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z - cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) - cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) - return cam_xyz - - -def depth2point_cam(sampled_depth, ref_intrinsic): - B, N, C, H, W = sampled_depth.shape - valid_z = sampled_depth - valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / ( - W - 1 - ) - valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / ( - H - 1 - ) - valid_y, valid_x = torch.meshgrid(valid_y, valid_x, indexing="ij") - # B,N,H,W - valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) - valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) - ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view( - B, N, C, H, W, 3 - ) # 1, 1, 5, 512, 640, 3 - cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 - return ndc_xyz, cam_xyz - - -def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix): - # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - _, xyz_cam = depth2point_cam( - depth_image[None, None, None, ...], intrinsic_matrix[None, ...] - ) - xyz_cam = xyz_cam.reshape(-1, 3) - xyz_world = torch.cat( - [xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1 - ) @ torch.inverse(extrinsic_matrix).transpose(0, 1) - xyz_world = xyz_world[..., :3] - - return xyz_world - - -def depth_pcd2normal(xyz): - hd, wd, _ = xyz.shape - bottom_point = xyz[..., 2:hd, 1 : wd - 1, :] - top_point = xyz[..., 0 : hd - 2, 1 : wd - 1, :] - right_point = xyz[..., 1 : hd - 1, 2:wd, :] - left_point = xyz[..., 1 : hd - 1, 0 : wd - 2, :] - left_to_right = right_point - left_point - bottom_to_top = top_point - bottom_point - xyz_normal = torch.cross(left_to_right, bottom_to_top, dim=-1) - xyz_normal = torch.nn.functional.normalize(xyz_normal, p=2, dim=-1) - xyz_normal = torch.nn.functional.pad( - xyz_normal.permute(2, 0, 1), (1, 1, 1, 1), mode="constant" - ).permute(1, 2, 0) - return xyz_normal - - -def normal_from_depth_image(depth, intrinsic_matrix, extrinsic_matrix): - # depth: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4) - # xyz_normal: (H, W, 3) - xyz_world = depth2point_world(depth, intrinsic_matrix, extrinsic_matrix) # (HxW, 3) - xyz_world = xyz_world.reshape(*depth.shape, 3) - xyz_normal = depth_pcd2normal(xyz_world) - - return xyz_normal diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/init_utils.py b/som_out/swing/code/2024-10-26-020013/flow3d/init_utils.py deleted file mode 100644 index 16b8e035761be88f50d38f6e935a3537f8c74dad..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/init_utils.py +++ /dev/null @@ -1,644 +0,0 @@ -import time -from typing import Literal - -import cupy as cp -import imageio.v3 as iio -import numpy as np - -# from pytorch3d.ops import sample_farthest_points -import roma -import torch -import torch.nn.functional as F -from cuml import HDBSCAN, KMeans -from loguru import logger as guru -from matplotlib.pyplot import get_cmap -from tqdm import tqdm -from viser import ViserServer - -from flow3d.loss_utils import ( - compute_accel_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - get_weights_for_procrustes, - knn, - masked_l1_loss, -) -from flow3d.params import GaussianParams, MotionBases -from flow3d.tensor_dataclass import StaticObservations, TrackObservations -from flow3d.transforms import cont_6d_to_rmat, rt_to_mat4, solve_procrustes -from flow3d.vis.utils import draw_keypoints_video, get_server, project_2d_tracks - - -def init_fg_from_tracks_3d( - cano_t: int, tracks_3d: TrackObservations, motion_coefs: torch.Tensor -) -> GaussianParams: - """ - using dataclasses individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_fg = tracks_3d.xyz.shape[0] - - # Initialize gaussian colors. - colors = torch.logit(tracks_3d.colors) - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(tracks_3d.xyz[:, cano_t], 3) - dists = torch.from_numpy(dists) - scales = dists.mean(dim=-1, keepdim=True) - scales = scales.clamp(torch.quantile(scales, 0.05), torch.quantile(scales, 0.95)) - scales = torch.log(scales.repeat(1, 3)) - # Initialize gaussian means. - means = tracks_3d.xyz[:, cano_t] - # Initialize gaussian orientations as random. - quats = torch.rand(num_fg, 4) - # Initialize gaussian opacities. - opacities = torch.logit(torch.full((num_fg,), 0.7)) - gaussians = GaussianParams(means, quats, scales, colors, opacities, motion_coefs) - return gaussians - - -def init_bg( - points: StaticObservations, -) -> GaussianParams: - """ - using dataclasses instead of individual tensors so we know they're consistent - and are always masked/filtered together - """ - num_init_bg_gaussians = points.xyz.shape[0] - bg_scene_center = points.xyz.mean(0) - bg_points_centered = points.xyz - bg_scene_center - bg_min_scale = bg_points_centered.quantile(0.05, dim=0) - bg_max_scale = bg_points_centered.quantile(0.95, dim=0) - bg_scene_scale = torch.max(bg_max_scale - bg_min_scale).item() / 2.0 - bkdg_colors = torch.logit(points.colors) - - # Initialize gaussian scales: find the average of the three nearest - # neighbors in the first frame for each point and use that as the - # scale. - dists, _ = knn(points.xyz, 3) - dists = torch.from_numpy(dists) - bg_scales = dists.mean(dim=-1, keepdim=True) - bkdg_scales = torch.log(bg_scales.repeat(1, 3)) - - bg_means = points.xyz - - # Initialize gaussian orientations by normals. - local_normals = points.normals.new_tensor([[0.0, 0.0, 1.0]]).expand_as( - points.normals - ) - bg_quats = roma.rotvec_to_unitquat( - F.normalize(local_normals.cross(points.normals), dim=-1) - * (local_normals * points.normals).sum(-1, keepdim=True).acos_() - ).roll(1, dims=-1) - bg_opacities = torch.logit(torch.full((num_init_bg_gaussians,), 0.7)) - gaussians = GaussianParams( - bg_means, - bg_quats, - bkdg_scales, - bkdg_colors, - bg_opacities, - scene_center=bg_scene_center, - scene_scale=bg_scene_scale, - ) - return gaussians - - -def init_motion_params_with_procrustes( - tracks_3d: TrackObservations, - num_bases: int, - rot_type: Literal["quat", "6d"], - cano_t: int, - cluster_init_method: str = "kmeans", - min_mean_weight: float = 0.1, - vis: bool = False, - port: int | None = None, -) -> tuple[MotionBases, torch.Tensor, TrackObservations]: - device = tracks_3d.xyz.device - num_frames = tracks_3d.xyz.shape[1] - # sample centers and get initial se3 motion bases by solving procrustes - means_cano = tracks_3d.xyz[:, cano_t].clone() # [num_gaussians, 3] - - # remove outliers - scene_center = means_cano.median(dim=0).values - print(f"{scene_center=}") - dists = torch.norm(means_cano - scene_center, dim=-1) - dists_th = torch.quantile(dists, 0.95) - valid_mask = dists < dists_th - - # remove tracks that are not visible in any frame - valid_mask = valid_mask & tracks_3d.visibles.any(dim=1) - print(f"{valid_mask.sum()=}") - - tracks_3d = tracks_3d.filter_valid(valid_mask) - - if vis and port is not None: - server = get_server(port) - try: - pts = tracks_3d.xyz.cpu().numpy() - clrs = tracks_3d.colors.cpu().numpy() - while True: - for t in range(num_frames): - server.scene.add_point_cloud("points", pts[:, t], clrs) - time.sleep(0.3) - except KeyboardInterrupt: - pass - - means_cano = means_cano[valid_mask] - - sampled_centers, num_bases, labels = sample_initial_bases_centers( - cluster_init_method, cano_t, tracks_3d, num_bases - ) - - # assign each point to the label to compute the cluster weight - ids, counts = labels.unique(return_counts=True) - ids = ids[counts > 100] - num_bases = len(ids) - sampled_centers = sampled_centers[:, ids] - print(f"{num_bases=} {sampled_centers.shape=}") - - # compute basis weights from the distance to the cluster centers - dists2centers = torch.norm(means_cano[:, None] - sampled_centers, dim=-1) - motion_coefs = 10 * torch.exp(-dists2centers) - - init_rots, init_ts = [], [] - - if rot_type == "quat": - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device) - rot_dim = 4 - else: - id_rot = torch.tensor([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=device) - rot_dim = 6 - - init_rots = id_rot.reshape(1, 1, rot_dim).repeat(num_bases, num_frames, 1) - init_ts = torch.zeros(num_bases, num_frames, 3, device=device) - errs_before = np.full((num_bases, num_frames), -1.0) - errs_after = np.full((num_bases, num_frames), -1.0) - - tgt_ts = list(range(cano_t - 1, -1, -1)) + list(range(cano_t, num_frames)) - print(f"{tgt_ts=}") - skipped_ts = {} - for n, cluster_id in enumerate(ids): - mask_in_cluster = labels == cluster_id - cluster = tracks_3d.xyz[mask_in_cluster].transpose( - 0, 1 - ) # [num_frames, n_pts, 3] - visibilities = tracks_3d.visibles[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - confidences = tracks_3d.confidences[mask_in_cluster].swapaxes( - 0, 1 - ) # [num_frames, n_pts] - weights = get_weights_for_procrustes(cluster, visibilities) - prev_t = cano_t - cluster_skip_ts = [] - for cur_t in tgt_ts: - # compute pairwise transform from cano_t - procrustes_weights = ( - weights[cano_t] - * weights[cur_t] - * (confidences[cano_t] + confidences[cur_t]) - / 2 - ) - if procrustes_weights.sum() < min_mean_weight * num_frames: - init_rots[n, cur_t] = init_rots[n, prev_t] - init_ts[n, cur_t] = init_ts[n, prev_t] - cluster_skip_ts.append(cur_t) - else: - se3, (err, err_before) = solve_procrustes( - cluster[cano_t], - cluster[cur_t], - weights=procrustes_weights, - enforce_se3=True, - rot_type=rot_type, - ) - init_rot, init_t, _ = se3 - assert init_rot.shape[-1] == rot_dim - # double cover - if rot_type == "quat" and torch.linalg.norm( - init_rot - init_rots[n][prev_t] - ) > torch.linalg.norm(-init_rot - init_rots[n][prev_t]): - init_rot = -init_rot - init_rots[n, cur_t] = init_rot - init_ts[n, cur_t] = init_t - if err == np.nan: - print(f"{cur_t=} {err=}") - print(f"{procrustes_weights.isnan().sum()=}") - if err_before == np.nan: - print(f"{cur_t=} {err_before=}") - print(f"{procrustes_weights.isnan().sum()=}") - errs_after[n, cur_t] = err - errs_before[n, cur_t] = err_before - prev_t = cur_t - skipped_ts[cluster_id.item()] = cluster_skip_ts - - guru.info(f"{skipped_ts=}") - guru.info( - "procrustes init median error: {:.5f} => {:.5f}".format( - np.median(errs_before[errs_before > 0]), - np.median(errs_after[errs_after > 0]), - ) - ) - guru.info( - "procrustes init mean error: {:.5f} => {:.5f}".format( - np.mean(errs_before[errs_before > 0]), np.mean(errs_after[errs_after > 0]) - ) - ) - guru.info(f"{init_rots.shape=}, {init_ts.shape=}, {motion_coefs.shape=}") - - if vis: - server = get_server(port) - center_idcs = torch.argmin(dists2centers, dim=0) - print(f"{dists2centers.shape=} {center_idcs.shape=}") - vis_se3_init_3d(server, init_rots, init_ts, means_cano[center_idcs]) - vis_tracks_3d(server, tracks_3d.xyz[center_idcs].numpy(), name="center_tracks") - import ipdb - - ipdb.set_trace() - - bases = MotionBases(init_rots, init_ts) - return bases, motion_coefs, tracks_3d - - -def run_initial_optim( - fg: GaussianParams, - bases: MotionBases, - tracks_3d: TrackObservations, - Ks: torch.Tensor, - w2cs: torch.Tensor, - num_iters: int = 1000, - use_depth_range_loss: bool = False, -): - """ - :param motion_rots: [num_bases, num_frames, 4|6] - :param motion_transls: [num_bases, num_frames, 3] - :param motion_coefs: [num_bases, num_frames] - :param means: [num_gaussians, 3] - """ - optimizer = torch.optim.Adam( - [ - {"params": bases.params["rots"], "lr": 1e-2}, - {"params": bases.params["transls"], "lr": 3e-2}, - {"params": fg.params["motion_coefs"], "lr": 1e-2}, - {"params": fg.params["means"], "lr": 1e-3}, - ], - ) - scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=0.1 ** (1 / num_iters) - ) - G = fg.params.means.shape[0] - num_frames = bases.num_frames - device = bases.params["rots"].device - - w_smooth_func = lambda i, min_v, max_v, th: ( - min_v if i <= th else (max_v - min_v) * (i - th) / (num_iters - th) + min_v - ) - - gt_2d, gt_depth = project_2d_tracks( - tracks_3d.xyz.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - # (G, T, 2) - gt_2d = gt_2d.swapaxes(0, 1) - # (G, T) - gt_depth = gt_depth.swapaxes(0, 1) - - ts = torch.arange(0, num_frames, device=device) - ts_clamped = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts_clamped - 1, ts_clamped, ts_clamped + 1)) # i (3B,) - - pbar = tqdm(range(0, num_iters)) - for i in pbar: - coefs = fg.get_coefs() - transfms = bases.compute_transforms(ts, coefs) - positions = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - - loss = 0.0 - track_3d_loss = masked_l1_loss( - positions, - tracks_3d.xyz, - (tracks_3d.visibles.float() * tracks_3d.confidences)[..., None], - ) - loss += track_3d_loss * 1.0 - - pred_2d, pred_depth = project_2d_tracks( - positions.swapaxes(0, 1), Ks, w2cs, return_depth=True - ) - pred_2d = pred_2d.swapaxes(0, 1) - pred_depth = pred_depth.swapaxes(0, 1) - - loss_2d = ( - masked_l1_loss( - pred_2d, - gt_2d, - (tracks_3d.invisibles.float() * tracks_3d.confidences)[..., None], - quantile=0.95, - ) - / Ks[0, 0, 0] - ) - loss += 0.5 * loss_2d - - if use_depth_range_loss: - near_depths = torch.quantile(gt_depth, 0.0, dim=0, keepdim=True) - far_depths = torch.quantile(gt_depth, 0.98, dim=0, keepdim=True) - loss_depth_in_range = 0 - if (pred_depth < near_depths).any(): - loss_depth_in_range += (near_depths - pred_depth)[ - pred_depth < near_depths - ].mean() - if (pred_depth > far_depths).any(): - loss_depth_in_range += (pred_depth - far_depths)[ - pred_depth > far_depths - ].mean() - - loss += loss_depth_in_range * w_smooth_func(i, 0.05, 0.5, 400) - - motion_coef_sparse_loss = 1 - (coefs**2).sum(dim=-1).mean() - loss += motion_coef_sparse_loss * 0.01 - - # motion basis should be smooth. - w_smooth = w_smooth_func(i, 0.01, 0.1, 400) - small_acc_loss = compute_se3_smoothness_loss( - bases.params["rots"], bases.params["transls"] - ) - loss += small_acc_loss * w_smooth - - small_acc_loss_tracks = compute_accel_loss(positions) - loss += small_acc_loss_tracks * w_smooth * 0.5 - - transfms_nbs = bases.compute_transforms(ts_neighbors, coefs) - means_nbs = torch.einsum( - "pnij,pj->pni", transfms_nbs, F.pad(fg.params["means"], (0, 1), value=1.0) - ) # (G, 3n, 3) - means_nbs = means_nbs.reshape(means_nbs.shape[0], 3, -1, 3) # [G, 3, n, 3] - z_accel_loss = compute_z_acc_loss(means_nbs, w2cs) - loss += z_accel_loss * 0.1 - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - pbar.set_description( - f"{loss.item():.3f} " - f"{track_3d_loss.item():.3f} " - f"{motion_coef_sparse_loss.item():.3f} " - f"{small_acc_loss.item():.3f} " - f"{small_acc_loss_tracks.item():.3f} " - f"{z_accel_loss.item():.3f} " - ) - - -def random_quats(N: int) -> torch.Tensor: - u = torch.rand(N, 1) - v = torch.rand(N, 1) - w = torch.rand(N, 1) - quats = torch.cat( - [ - torch.sqrt(1.0 - u) * torch.sin(2.0 * np.pi * v), - torch.sqrt(1.0 - u) * torch.cos(2.0 * np.pi * v), - torch.sqrt(u) * torch.sin(2.0 * np.pi * w), - torch.sqrt(u) * torch.cos(2.0 * np.pi * w), - ], - -1, - ) - return quats - - -def compute_means(ts, fg: GaussianParams, bases: MotionBases): - transfms = bases.compute_transforms(ts, fg.get_coefs()) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(fg.params["means"], (0, 1), value=1.0), - ) - return means - - -def vis_init_params( - server, - fg: GaussianParams, - bases: MotionBases, - name="init_params", - num_vis: int = 100, -): - idcs = np.random.choice(fg.num_gaussians, num_vis) - labels = np.linspace(0, 1, num_vis) - ts = torch.arange(bases.num_frames, device=bases.params["rots"].device) - with torch.no_grad(): - pred_means = compute_means(ts, fg, bases) - vis_means = pred_means[idcs].detach().cpu().numpy() - vis_tracks_3d(server, vis_means, labels, name=name) - - -@torch.no_grad() -def vis_se3_init_3d(server, init_rots, init_ts, basis_centers): - """ - :param init_rots: [num_bases, num_frames, 4|6] - :param init_ts: [num_bases, num_frames, 3] - :param basis_centers: [num_bases, 3] - """ - # visualize the initial centers across time - rot_dim = init_rots.shape[-1] - assert rot_dim in [4, 6] - num_bases = init_rots.shape[0] - assert init_ts.shape[0] == num_bases - assert basis_centers.shape[0] == num_bases - labels = np.linspace(0, 1, num_bases) - if rot_dim == 4: - quats = F.normalize(init_rots, dim=-1, p=2) - rmats = roma.unitquat_to_rotmat(quats.roll(-1, dims=-1)) - else: - rmats = cont_6d_to_rmat(init_rots) - transls = init_ts - transfms = rt_to_mat4(rmats, transls) - center_tracks3d = torch.einsum( - "bnij,bj->bni", transfms, F.pad(basis_centers, (0, 1), value=1.0) - )[..., :3] - vis_tracks_3d(server, center_tracks3d.cpu().numpy(), labels, name="se3_centers") - - -@torch.no_grad() -def vis_tracks_2d_video( - path, - imgs: np.ndarray, - tracks_3d: np.ndarray, - Ks: np.ndarray, - w2cs: np.ndarray, - occs=None, - radius: int = 3, -): - num_tracks = tracks_3d.shape[0] - labels = np.linspace(0, 1, num_tracks) - cmap = get_cmap("gist_rainbow") - colors = cmap(labels)[:, :3] - tracks_2d = ( - project_2d_tracks(tracks_3d.swapaxes(0, 1), Ks, w2cs).cpu().numpy() # type: ignore - ) - frames = np.asarray( - draw_keypoints_video(imgs, tracks_2d, colors, occs, radius=radius) - ) - iio.imwrite(path, frames, fps=15) - - -def vis_tracks_3d( - server: ViserServer, - vis_tracks: np.ndarray, - vis_label: np.ndarray | None = None, - name: str = "tracks", -): - """ - :param vis_tracks (np.ndarray): (N, T, 3) - :param vis_label (np.ndarray): (N) - """ - cmap = get_cmap("gist_rainbow") - if vis_label is None: - vis_label = np.linspace(0, 1, len(vis_tracks)) - colors = cmap(np.asarray(vis_label))[:, :3] - guru.info(f"{colors.shape=}, {vis_tracks.shape=}") - N, T = vis_tracks.shape[:2] - vis_tracks = np.asarray(vis_tracks) - for i in range(N): - server.scene.add_spline_catmull_rom( - f"/{name}/{i}/spline", vis_tracks[i], color=colors[i], segments=T - 1 - ) - server.scene.add_point_cloud( - f"/{name}/{i}/start", - vis_tracks[i, [0]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="circle", - ) - server.scene.add_point_cloud( - f"/{name}/{i}/end", - vis_tracks[i, [-1]], - colors=colors[i : i + 1], - point_size=0.05, - point_shape="diamond", - ) - - -def sample_initial_bases_centers( - mode: str, cano_t: int, tracks_3d: TrackObservations, num_bases: int -): - """ - :param mode: "farthest" | "hdbscan" | "kmeans" - :param tracks_3d: [G, T, 3] - :param cano_t: canonical index - :param num_bases: number of SE3 bases - """ - assert mode in ["farthest", "hdbscan", "kmeans"] - means_canonical = tracks_3d.xyz[:, cano_t].clone() - # if mode == "farthest": - # vis_mask = tracks_3d.visibles[:, cano_t] - # sampled_centers, _ = sample_farthest_points( - # means_canonical[vis_mask][None], - # K=num_bases, - # random_start_point=True, - # ) # [1, num_bases, 3] - # dists2centers = torch.norm(means_canonical[:, None] - sampled_centers, dim=-1).T - # return sampled_centers, num_bases, dists2centers - - # linearly interpolate missing 3d points - xyz = cp.asarray(tracks_3d.xyz) - print(f"{xyz.shape=}") - visibles = cp.asarray(tracks_3d.visibles) - - num_tracks = xyz.shape[0] - xyz_interp = batched_interp_masked(xyz, visibles) - - # num_vis = 50 - # server = get_server(port=8890) - # idcs = np.random.choice(num_tracks, num_vis) - # labels = np.linspace(0, 1, num_vis) - # vis_tracks_3d(server, tracks_3d.xyz[idcs].get(), labels, name="raw_tracks") - # vis_tracks_3d(server, xyz_interp[idcs].get(), labels, name="interp_tracks") - # import ipdb; ipdb.set_trace() - - velocities = xyz_interp[:, 1:] - xyz_interp[:, :-1] - vel_dirs = ( - velocities / (cp.linalg.norm(velocities, axis=-1, keepdims=True) + 1e-5) - ).reshape((num_tracks, -1)) - - # [num_bases, num_gaussians] - if mode == "kmeans": - model = KMeans(n_clusters=num_bases) - else: - model = HDBSCAN(min_cluster_size=20, max_cluster_size=num_tracks // 4) - model.fit(vel_dirs) - labels = model.labels_ - num_bases = labels.max().item() + 1 - sampled_centers = torch.stack( - [ - means_canonical[torch.tensor(labels == i)].median(dim=0).values - for i in range(num_bases) - ] - )[None] - print("number of {} clusters: ".format(mode), num_bases) - return sampled_centers, num_bases, torch.tensor(labels) - - -def interp_masked(vals: cp.ndarray, mask: cp.ndarray, pad: int = 1) -> cp.ndarray: - """ - hacky way to interpolate batched with cupy - by concatenating the batches and pad with dummy values - :param vals: [B, M, *] - :param mask: [B, M] - """ - assert mask.ndim == 2 - assert vals.shape[:2] == mask.shape - - B, M = mask.shape - - # get the first and last valid values for each track - sh = vals.shape[2:] - vals = vals.reshape((B, M, -1)) - D = vals.shape[-1] - first_val_idcs = cp.argmax(mask, axis=-1) - last_val_idcs = M - 1 - cp.argmax(cp.flip(mask, axis=-1), axis=-1) - bidcs = cp.arange(B) - - v0 = vals[bidcs, first_val_idcs][:, None] - v1 = vals[bidcs, last_val_idcs][:, None] - m0 = mask[bidcs, first_val_idcs][:, None] - m1 = mask[bidcs, last_val_idcs][:, None] - if pad > 1: - v0 = cp.tile(v0, [1, pad, 1]) - v1 = cp.tile(v1, [1, pad, 1]) - m0 = cp.tile(m0, [1, pad]) - m1 = cp.tile(m1, [1, pad]) - - vals_pad = cp.concatenate([v0, vals, v1], axis=1) - mask_pad = cp.concatenate([m0, mask, m1], axis=1) - - M_pad = vals_pad.shape[1] - vals_flat = vals_pad.reshape((B * M_pad, -1)) - mask_flat = mask_pad.reshape((B * M_pad,)) - idcs = cp.where(mask_flat)[0] - - cx = cp.arange(B * M_pad) - out = cp.zeros((B * M_pad, D), dtype=vals_flat.dtype) - for d in range(D): - out[:, d] = cp.interp(cx, idcs, vals_flat[idcs, d]) - - out = out.reshape((B, M_pad, *sh))[:, pad:-pad] - return out - - -def batched_interp_masked( - vals: cp.ndarray, mask: cp.ndarray, batch_num: int = 4096, batch_time: int = 64 -): - assert mask.ndim == 2 - B, M = mask.shape - out = cp.zeros_like(vals) - for b in tqdm(range(0, B, batch_num), leave=False): - for m in tqdm(range(0, M, batch_time), leave=False): - x = interp_masked( - vals[b : b + batch_num, m : m + batch_time], - mask[b : b + batch_num, m : m + batch_time], - ) # (batch_num, batch_time, *) - out[b : b + batch_num, m : m + batch_time] = x - return out diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/loss_utils.py b/som_out/swing/code/2024-10-26-020013/flow3d/loss_utils.py deleted file mode 100644 index 244bb4ff7b0896b87721339275ad1cdd42d7fd1a..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/loss_utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from sklearn.neighbors import NearestNeighbors - - -def masked_mse_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_mse_loss(pred, gt, quantile) - else: - sum_loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_l1_loss(pred, gt, mask=None, normalize=True, quantile: float = 1.0): - if mask is None: - return trimmed_l1_loss(pred, gt, quantile) - else: - sum_loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1, keepdim=True) - quantile_mask = ( - (sum_loss < torch.quantile(sum_loss, quantile)).squeeze(-1) - if quantile < 1 - else torch.ones_like(sum_loss, dtype=torch.bool).squeeze(-1) - ) - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum((sum_loss * mask)[quantile_mask]) / ( - ndim * torch.sum(mask[quantile_mask]) + 1e-8 - ) - else: - return torch.mean((sum_loss * mask)[quantile_mask]) - - -def masked_huber_loss(pred, gt, delta, mask=None, normalize=True): - if mask is None: - return F.huber_loss(pred, gt, delta=delta) - else: - sum_loss = F.huber_loss(pred, gt, delta=delta, reduction="none") - ndim = sum_loss.shape[-1] - if normalize: - return torch.sum(sum_loss * mask) / (ndim * torch.sum(mask) + 1e-8) - else: - return torch.mean(sum_loss * mask) - - -def trimmed_mse_loss(pred, gt, quantile=0.9): - loss = F.mse_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def trimmed_l1_loss(pred, gt, quantile=0.9): - loss = F.l1_loss(pred, gt, reduction="none").mean(dim=-1) - loss_at_quantile = torch.quantile(loss, quantile) - trimmed_loss = loss[loss < loss_at_quantile].mean() - return trimmed_loss - - -def compute_gradient_loss(pred, gt, mask, quantile=0.98): - """ - Compute gradient loss - pred: (batch_size, H, W, D) or (batch_size, H, W) - gt: (batch_size, H, W, D) or (batch_size, H, W) - mask: (batch_size, H, W), bool or float - """ - # NOTE: messy need to be cleaned up - mask_x = mask[:, :, 1:] * mask[:, :, :-1] - mask_y = mask[:, 1:, :] * mask[:, :-1, :] - pred_grad_x = pred[:, :, 1:] - pred[:, :, :-1] - pred_grad_y = pred[:, 1:, :] - pred[:, :-1, :] - gt_grad_x = gt[:, :, 1:] - gt[:, :, :-1] - gt_grad_y = gt[:, 1:, :] - gt[:, :-1, :] - loss = masked_l1_loss( - pred_grad_x[mask_x][..., None], gt_grad_x[mask_x][..., None], quantile=quantile - ) + masked_l1_loss( - pred_grad_y[mask_y][..., None], gt_grad_y[mask_y][..., None], quantile=quantile - ) - return loss - - -def knn(x: torch.Tensor, k: int) -> tuple[np.ndarray, np.ndarray]: - x = x.cpu().numpy() - knn_model = NearestNeighbors( - n_neighbors=k + 1, algorithm="auto", metric="euclidean" - ).fit(x) - distances, indices = knn_model.kneighbors(x) - return distances[:, 1:].astype(np.float32), indices[:, 1:].astype(np.float32) - - -def get_weights_for_procrustes(clusters, visibilities=None): - clusters_median = clusters.median(dim=-2, keepdim=True)[0] - dists2clusters_center = torch.norm(clusters - clusters_median, dim=-1) - dists2clusters_center /= dists2clusters_center.median(dim=-1, keepdim=True)[0] - weights = torch.exp(-dists2clusters_center) - weights /= weights.mean(dim=-1, keepdim=True) + 1e-6 - if visibilities is not None: - weights *= visibilities.float() + 1e-6 - invalid = dists2clusters_center > np.quantile( - dists2clusters_center.cpu().numpy(), 0.9 - ) - invalid |= torch.isnan(weights) - weights[invalid] = 0 - return weights - - -def compute_z_acc_loss(means_ts_nb: torch.Tensor, w2cs: torch.Tensor): - """ - :param means_ts (G, 3, B, 3) - :param w2cs (B, 4, 4) - return (float) - """ - camera_center_t = torch.linalg.inv(w2cs)[:, :3, 3] # (B, 3) - ray_dir = F.normalize( - means_ts_nb[:, 1] - camera_center_t, p=2.0, dim=-1 - ) # [G, B, 3] - # acc = 2 * means[:, 1] - means[:, 0] - means[:, 2] # [G, B, 3] - # acc_loss = (acc * ray_dir).sum(dim=-1).abs().mean() - acc_loss = ( - ((means_ts_nb[:, 1] - means_ts_nb[:, 0]) * ray_dir).sum(dim=-1) ** 2 - ).mean() + ( - ((means_ts_nb[:, 2] - means_ts_nb[:, 1]) * ray_dir).sum(dim=-1) ** 2 - ).mean() - return acc_loss - - -def compute_se3_smoothness_loss( - rots: torch.Tensor, - transls: torch.Tensor, - weight_rot: float = 1.0, - weight_transl: float = 2.0, -): - """ - central differences - :param motion_transls (K, T, 3) - :param motion_rots (K, T, 6) - """ - r_accel_loss = compute_accel_loss(rots) - t_accel_loss = compute_accel_loss(transls) - return r_accel_loss * weight_rot + t_accel_loss * weight_transl - - -def compute_accel_loss(transls): - accel = 2 * transls[:, 1:-1] - transls[:, :-2] - transls[:, 2:] - loss = accel.norm(dim=-1).mean() - return loss - diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/metrics.py b/som_out/swing/code/2024-10-26-020013/flow3d/metrics.py deleted file mode 100644 index 26efadea6ce950d1c597dee0d67b46714846ead5..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/metrics.py +++ /dev/null @@ -1,313 +0,0 @@ -from typing import Literal - -import numpy as np -import torch -import torch.nn.functional as F -from torchmetrics.functional.image.lpips import _NoTrainLpips -from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure -from torchmetrics.metric import Metric -from torchmetrics.utilities import dim_zero_cat -from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE - - -def compute_psnr( - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, -) -> float: - """ - Args: - preds (torch.Tensor): (..., 3) predicted images in [0, 1]. - targets (torch.Tensor): (..., 3) target images in [0, 1]. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - - Returns: - psnr (float): Peak signal-to-noise ratio. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - return ( - -10.0 - * torch.log( - F.mse_loss( - preds * masks[..., None], - targets * masks[..., None], - reduction="sum", - ) - / masks.sum().clamp(min=1.0) - / 3.0 - ) - / np.log(10.0) - ).item() - - -def compute_pose_errors( - preds: torch.Tensor, targets: torch.Tensor -) -> tuple[float, float, float]: - """ - Args: - preds: (N, 4, 4) predicted camera poses. - targets: (N, 4, 4) target camera poses. - - Returns: - ate (float): Absolute trajectory error. - rpe_t (float): Relative pose error in translation. - rpe_r (float): Relative pose error in rotation (degree). - """ - # Compute ATE. - ate = torch.linalg.norm(preds[:, :3, -1] - targets[:, :3, -1], dim=-1).mean().item() - # Compute RPE_t and RPE_r. - # NOTE(hangg): It's important to use numpy here for the accuracy of RPE_r. - # torch has numerical issues for acos when the value is close to 1.0, i.e. - # RPE_r is supposed to be very small, and will result in artificially large - # error. - preds = preds.detach().cpu().numpy() - targets = targets.detach().cpu().numpy() - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - pred_rels = np.linalg.inv(preds[:-1]) @ preds[1:] - target_rels = np.linalg.inv(targets[:-1]) @ targets[1:] - error_rels = np.linalg.inv(target_rels) @ pred_rels - traces = error_rels[:, :3, :3].trace(axis1=-2, axis2=-1) - rpe_t = np.linalg.norm(error_rels[:, :3, -1], axis=-1).mean().item() - rpe_r = ( - np.arccos(np.clip((traces - 1.0) / 2.0, -1.0, 1.0)).mean().item() - / np.pi - * 180.0 - ) - return ate, rpe_t, rpe_r - - -class mPSNR(PeakSignalNoiseRatio): - sum_squared_error: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__(self, **kwargs) -> None: - super().__init__( - data_range=1.0, - base=10.0, - dim=None, - reduction="elementwise_mean", - **kwargs, - ) - self.add_state("sum_squared_error", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (..., 3) float32 predicted images. - targets (torch.Tensor): (..., 3) float32 target images. - masks (torch.Tensor | None): (...,) optional binary masks where the - 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - self.sum_squared_error.append( - torch.sum(torch.pow((preds - targets) * masks[..., None], 2)) - ) - self.total.append(masks.sum().to(torch.int64) * 3) - - def compute(self) -> torch.Tensor: - """Compute peak signal-to-noise ratio over state.""" - sum_squared_error = dim_zero_cat(self.sum_squared_error) - total = dim_zero_cat(self.total) - return -10.0 * torch.log(sum_squared_error / total).mean() / np.log(10.0) - - -class mSSIM(StructuralSimilarityIndexMeasure): - similarity: list - - def __init__(self, **kwargs) -> None: - super().__init__( - reduction=None, - data_range=1.0, - return_full_image=False, - **kwargs, - ) - assert isinstance(self.sigma, float) - - def __len__(self) -> int: - return sum([s.shape[0] for s in self.similarity]) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update state with predictions and targets. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional binary masks where - the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - - # Construct a 1D Gaussian blur filter. - assert isinstance(self.kernel_size, int) - hw = self.kernel_size // 2 - shift = (2 * hw - self.kernel_size + 1) / 2 - assert isinstance(self.sigma, float) - f_i = ( - (torch.arange(self.kernel_size, device=preds.device) - hw + shift) - / self.sigma - ) ** 2 - filt = torch.exp(-0.5 * f_i) - filt /= torch.sum(filt) - - # Blur in x and y (faster than the 2D convolution). - def convolve2d(z, m, f): - # z: (B, H, W, C), m: (B, H, W), f: (Hf, Wf). - z = z.permute(0, 3, 1, 2) - m = m[:, None] - f = f[None, None].expand(z.shape[1], -1, -1, -1) - z_ = torch.nn.functional.conv2d( - z * m, f, padding="valid", groups=z.shape[1] - ) - m_ = torch.nn.functional.conv2d(m, torch.ones_like(f[:1]), padding="valid") - return torch.where( - m_ != 0, z_ * torch.ones_like(f).sum() / (m_ * z.shape[1]), 0 - ).permute(0, 2, 3, 1), (m_ != 0)[:, 0].to(z.dtype) - - filt_fn1 = lambda z, m: convolve2d(z, m, filt[:, None]) - filt_fn2 = lambda z, m: convolve2d(z, m, filt[None, :]) - filt_fn = lambda z, m: filt_fn1(*filt_fn2(z, m)) - - mu0 = filt_fn(preds, masks)[0] - mu1 = filt_fn(targets, masks)[0] - mu00 = mu0 * mu0 - mu11 = mu1 * mu1 - mu01 = mu0 * mu1 - sigma00 = filt_fn(preds**2, masks)[0] - mu00 - sigma11 = filt_fn(targets**2, masks)[0] - mu11 - sigma01 = filt_fn(preds * targets, masks)[0] - mu01 - - # Clip the variances and covariances to valid values. - # Variance must be non-negative: - sigma00 = sigma00.clamp(min=0.0) - sigma11 = sigma11.clamp(min=0.0) - sigma01 = torch.sign(sigma01) * torch.minimum( - torch.sqrt(sigma00 * sigma11), torch.abs(sigma01) - ) - - assert isinstance(self.data_range, float) - c1 = (self.k1 * self.data_range) ** 2 - c2 = (self.k2 * self.data_range) ** 2 - numer = (2 * mu01 + c1) * (2 * sigma01 + c2) - denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) - ssim_map = numer / denom - - self.similarity.append(ssim_map.mean(dim=(1, 2, 3))) - - def compute(self) -> torch.Tensor: - """Compute final SSIM metric.""" - return torch.cat(self.similarity).mean() - - -class mLPIPS(Metric): - sum_scores: list[torch.Tensor] - total: list[torch.Tensor] - - def __init__( - self, - net_type: Literal["vgg", "alex", "squeeze"] = "alex", - **kwargs, - ): - super().__init__(**kwargs) - - if not _TORCHVISION_AVAILABLE: - raise ModuleNotFoundError( - "LPIPS metric requires that torchvision is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torchvision`." - ) - - valid_net_type = ("vgg", "alex", "squeeze") - if net_type not in valid_net_type: - raise ValueError( - f"Argument `net_type` must be one of {valid_net_type}, but got {net_type}." - ) - self.net = _NoTrainLpips(net=net_type, spatial=True) - - self.add_state("sum_scores", [], dist_reduce_fx="cat") - self.add_state("total", [], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update( - self, - preds: torch.Tensor, - targets: torch.Tensor, - masks: torch.Tensor | None = None, - ): - """Update internal states with lpips scores. - - Args: - preds (torch.Tensor): (B, H, W, 3) float32 predicted images. - targets (torch.Tensor): (B, H, W, 3) float32 target images. - masks (torch.Tensor | None): (B, H, W) optional float32 binary - masks where the 1-regions will be taken into account. - """ - if masks is None: - masks = torch.ones_like(preds[..., 0]) - scores = self.net( - (preds * masks[..., None]).permute(0, 3, 1, 2), - (targets * masks[..., None]).permute(0, 3, 1, 2), - normalize=True, - ) - self.sum_scores.append((scores * masks[:, None]).sum()) - self.total.append(masks.sum().to(torch.int64)) - - def compute(self) -> torch.Tensor: - """Compute final perceptual similarity metric.""" - return ( - torch.tensor(self.sum_scores, device=self.device) - / torch.tensor(self.total, device=self.device) - ).mean() - - -class PCK(Metric): - correct: list[torch.Tensor] - total: list[int] - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.add_state("correct", default=[], dist_reduce_fx="cat") - self.add_state("total", default=[], dist_reduce_fx="cat") - - def __len__(self) -> int: - return len(self.total) - - def update(self, preds: torch.Tensor, targets: torch.Tensor, threshold: float): - """Update internal states with PCK scores. - - Args: - preds (torch.Tensor): (N, 2) predicted 2D keypoints. - targets (torch.Tensor): (N, 2) targets 2D keypoints. - threshold (float): PCK threshold. - """ - - self.correct.append( - (torch.linalg.norm(preds - targets, dim=-1) < threshold).sum() - ) - self.total.append(preds.shape[0]) - - def compute(self) -> torch.Tensor: - """Compute PCK over state.""" - return ( - torch.tensor(self.correct, device=self.device) - / torch.clamp(torch.tensor(self.total, device=self.device), min=1e-8) - ).mean() diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/params.py b/som_out/swing/code/2024-10-26-020013/flow3d/params.py deleted file mode 100644 index db6b26fd64d68a142900267ec83e4c5f1ed8604e..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/params.py +++ /dev/null @@ -1,184 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from flow3d.transforms import cont_6d_to_rmat - - -class GaussianParams(nn.Module): - def __init__( - self, - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, - scene_center: torch.Tensor | None = None, - scene_scale: torch.Tensor | float = 1.0, - ): - super().__init__() - if not check_gaussian_sizes( - means, quats, scales, colors, opacities, motion_coefs - ): - import ipdb - - ipdb.set_trace() - params_dict = { - "means": nn.Parameter(means), - "quats": nn.Parameter(quats), - "scales": nn.Parameter(scales), - "colors": nn.Parameter(colors), - "opacities": nn.Parameter(opacities), - } - if motion_coefs is not None: - params_dict["motion_coefs"] = nn.Parameter(motion_coefs) - self.params = nn.ParameterDict(params_dict) - self.quat_activation = lambda x: F.normalize(x, dim=-1, p=2) - self.color_activation = torch.sigmoid - self.scale_activation = torch.exp - self.opacity_activation = torch.sigmoid - self.motion_coef_activation = lambda x: F.softmax(x, dim=-1) - - if scene_center is None: - scene_center = torch.zeros(3, device=means.device) - self.register_buffer("scene_center", scene_center) - self.register_buffer("scene_scale", torch.as_tensor(scene_scale)) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - req_keys = ["means", "quats", "scales", "colors", "opacities"] - assert all(f"{prefix}{k}" in state_dict for k in req_keys) - args = { - "motion_coefs": None, - "scene_center": torch.zeros(3), - "scene_scale": torch.tensor(1.0), - } - for k in req_keys + list(args.keys()): - if f"{prefix}{k}" in state_dict: - args[k] = state_dict[f"{prefix}{k}"] - return GaussianParams(**args) - - @property - def num_gaussians(self) -> int: - return self.params["means"].shape[0] - - def get_colors(self) -> torch.Tensor: - return self.color_activation(self.params["colors"]) - - def get_scales(self) -> torch.Tensor: - return self.scale_activation(self.params["scales"]) - - def get_opacities(self) -> torch.Tensor: - return self.opacity_activation(self.params["opacities"]) - - def get_quats(self) -> torch.Tensor: - return self.quat_activation(self.params["quats"]) - - def get_coefs(self) -> torch.Tensor: - assert "motion_coefs" in self.params - return self.motion_coef_activation(self.params["motion_coefs"]) - - def densify_params(self, should_split, should_dup): - """ - densify gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_dup = x[should_dup] - x_split = x[should_split].repeat([2] + [1] * (x.ndim - 1)) - if name == "scales": - x_split -= math.log(1.6) - x_new = nn.Parameter(torch.cat([x[~should_split], x_dup, x_split], dim=0)) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def cull_params(self, should_cull): - """ - cull gaussians - """ - updated_params = {} - for name, x in self.params.items(): - x_new = nn.Parameter(x[~should_cull]) - updated_params[name] = x_new - self.params[name] = x_new - return updated_params - - def reset_opacities(self, new_val): - """ - reset all opacities to new_val - """ - self.params["opacities"].data.fill_(new_val) - updated_params = {"opacities": self.params["opacities"]} - return updated_params - - -class MotionBases(nn.Module): - def __init__(self, rots, transls): - super().__init__() - self.num_frames = rots.shape[1] - self.num_bases = rots.shape[0] - assert check_bases_sizes(rots, transls) - self.params = nn.ParameterDict( - { - "rots": nn.Parameter(rots), - "transls": nn.Parameter(transls), - } - ) - - @staticmethod - def init_from_state_dict(state_dict, prefix="params."): - param_keys = ["rots", "transls"] - assert all(f"{prefix}{k}" in state_dict for k in param_keys) - args = {k: state_dict[f"{prefix}{k}"] for k in param_keys} - return MotionBases(**args) - - def compute_transforms(self, ts: torch.Tensor, coefs: torch.Tensor) -> torch.Tensor: - """ - :param ts (B) - :param coefs (G, K) - returns transforms (G, B, 3, 4) - """ - transls = self.params["transls"][:, ts] # (K, B, 3) - rots = self.params["rots"][:, ts] # (K, B, 6) - transls = torch.einsum("pk,kni->pni", coefs, transls) - rots = torch.einsum("pk,kni->pni", coefs, rots) # (G, B, 6) - rotmats = cont_6d_to_rmat(rots) # (K, B, 3, 3) - return torch.cat([rotmats, transls[..., None]], dim=-1) - - -def check_gaussian_sizes( - means: torch.Tensor, - quats: torch.Tensor, - scales: torch.Tensor, - colors: torch.Tensor, - opacities: torch.Tensor, - motion_coefs: torch.Tensor | None = None, -) -> bool: - dims = means.shape[:-1] - leading_dims_match = ( - quats.shape[:-1] == dims - and scales.shape[:-1] == dims - and colors.shape[:-1] == dims - and opacities.shape == dims - ) - if motion_coefs is not None and motion_coefs.numel() > 0: - leading_dims_match &= motion_coefs.shape[:-1] == dims - dims_correct = ( - means.shape[-1] == 3 - and (quats.shape[-1] == 4) - and (scales.shape[-1] == 3) - and (colors.shape[-1] == 3) - ) - return leading_dims_match and dims_correct - - -def check_bases_sizes(motion_rots: torch.Tensor, motion_transls: torch.Tensor) -> bool: - return ( - motion_rots.shape[-1] == 6 - and motion_transls.shape[-1] == 3 - and motion_rots.shape[:-2] == motion_transls.shape[:-2] - ) diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/renderer.py b/som_out/swing/code/2024-10-26-020013/flow3d/renderer.py deleted file mode 100644 index 974a1a630b18a9392a545c44ec4c981277354f1b..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/renderer.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState - -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import draw_tracks_2d_th, get_server -from flow3d.vis.viewer import DynamicViewer - - -class Renderer: - def __init__( - self, - model: SceneModel, - device: torch.device, - # Logging. - work_dir: str, - port: int | None = None, - ): - self.device = device - - self.model = model - self.num_frames = model.num_frames - - self.work_dir = work_dir - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="rendering" - ) - - self.tracks_3d = self.model.compute_poses_fg( - # torch.arange(max(0, t - 20), max(1, t), device=self.device), - torch.arange(self.num_frames, device=self.device), - inds=torch.arange(10, device=self.device), - )[0] - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> "Renderer": - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - renderer = Renderer(model, device, *args, **kwargs) - renderer.global_step = ckpt.get("global_step", 0) - renderer.epoch = ckpt.get("epoch", 0) - return renderer - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - if self.viewer is None: - return np.full((img_wh[1], img_wh[0], 3), 255, dtype=np.uint8) - - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - if not self.viewer._render_track_checkbox.value: - img = (img.cpu().numpy() * 255.0).astype(np.uint8) - else: - assert t is not None - tracks_3d = self.tracks_3d[:, max(0, t - 20) : max(1, t)] - tracks_2d = torch.einsum( - "ij,jk,nbk->nbi", K, w2c[:3], F.pad(tracks_3d, (0, 1), value=1.0) - ) - tracks_2d = tracks_2d[..., :2] / tracks_2d[..., 2:] - img = draw_tracks_2d_th(img, tracks_2d) - return img diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/scene_model.py b/som_out/swing/code/2024-10-26-020013/flow3d/scene_model.py deleted file mode 100644 index 7bd685b691153b62234a8084dffe33b11b83b327..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/scene_model.py +++ /dev/null @@ -1,292 +0,0 @@ -import roma -import torch -import torch.nn as nn -import torch.nn.functional as F -from gsplat.rendering import rasterization -from torch import Tensor - -from flow3d.params import GaussianParams, MotionBases - - -class SceneModel(nn.Module): - def __init__( - self, - Ks: Tensor, - w2cs: Tensor, - fg_params: GaussianParams, - motion_bases: MotionBases, - bg_params: GaussianParams | None = None, - ): - super().__init__() - self.num_frames = motion_bases.num_frames - self.fg = fg_params - self.motion_bases = motion_bases - self.bg = bg_params - scene_scale = 1.0 if bg_params is None else bg_params.scene_scale - self.register_buffer("bg_scene_scale", torch.as_tensor(scene_scale)) - self.register_buffer("Ks", Ks) - self.register_buffer("w2cs", w2cs) - - self._current_xys = None - self._current_radii = None - self._current_img_wh = None - - @property - def num_gaussians(self) -> int: - return self.num_bg_gaussians + self.num_fg_gaussians - - @property - def num_bg_gaussians(self) -> int: - return self.bg.num_gaussians if self.bg is not None else 0 - - @property - def num_fg_gaussians(self) -> int: - return self.fg.num_gaussians - - @property - def num_motion_bases(self) -> int: - return self.motion_bases.num_bases - - @property - def has_bg(self) -> bool: - return self.bg is not None - - def compute_poses_bg(self) -> tuple[torch.Tensor, torch.Tensor]: - """ - Returns: - means: (G, B, 3) - quats: (G, B, 4) - """ - assert self.bg is not None - return self.bg.params["means"], self.bg.get_quats() - - def compute_transforms( - self, ts: torch.Tensor, inds: torch.Tensor | None = None - ) -> torch.Tensor: - coefs = self.fg.get_coefs() # (G, K) - if inds is not None: - coefs = coefs[inds] - transfms = self.motion_bases.compute_transforms(ts, coefs) # (G, B, 3, 4) - return transfms - - def compute_poses_fg( - self, ts: torch.Tensor | None, inds: torch.Tensor | None = None - ) -> tuple[torch.Tensor, torch.Tensor]: - """ - :returns means: (G, B, 3), quats: (G, B, 4) - """ - means = self.fg.params["means"] # (G, 3) - quats = self.fg.get_quats() # (G, 4) - if inds is not None: - means = means[inds] - quats = quats[inds] - if ts is not None: - transfms = self.compute_transforms(ts, inds) # (G, B, 3, 4) - means = torch.einsum( - "pnij,pj->pni", - transfms, - F.pad(means, (0, 1), value=1.0), - ) - quats = roma.quat_xyzw_to_wxyz( - ( - roma.quat_product( - roma.rotmat_to_unitquat(transfms[..., :3, :3]), - roma.quat_wxyz_to_xyzw(quats[:, None]), - ) - ) - ) - quats = F.normalize(quats, p=2, dim=-1) - else: - means = means[:, None] - quats = quats[:, None] - return means, quats - - def compute_poses_all( - self, ts: torch.Tensor | None - ) -> tuple[torch.Tensor, torch.Tensor]: - means, quats = self.compute_poses_fg(ts) - if self.has_bg: - bg_means, bg_quats = self.compute_poses_bg() - means = torch.cat( - [means, bg_means[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - quats = torch.cat( - [quats, bg_quats[:, None].expand(-1, means.shape[1], -1)], dim=0 - ).contiguous() - return means, quats - - def get_colors_all(self) -> torch.Tensor: - colors = self.fg.get_colors() - if self.bg is not None: - colors = torch.cat([colors, self.bg.get_colors()], dim=0).contiguous() - return colors - - def get_scales_all(self) -> torch.Tensor: - scales = self.fg.get_scales() - if self.bg is not None: - scales = torch.cat([scales, self.bg.get_scales()], dim=0).contiguous() - return scales - - def get_opacities_all(self) -> torch.Tensor: - """ - :returns colors: (G, 3), scales: (G, 3), opacities: (G, 1) - """ - opacities = self.fg.get_opacities() - if self.bg is not None: - opacities = torch.cat( - [opacities, self.bg.get_opacities()], dim=0 - ).contiguous() - return opacities - - @staticmethod - def init_from_state_dict(state_dict, prefix=""): - fg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}fg.params." - ) - bg = None - if any("bg." in k for k in state_dict): - bg = GaussianParams.init_from_state_dict( - state_dict, prefix=f"{prefix}bg.params." - ) - motion_bases = MotionBases.init_from_state_dict( - state_dict, prefix=f"{prefix}motion_bases.params." - ) - Ks = state_dict[f"{prefix}Ks"] - w2cs = state_dict[f"{prefix}w2cs"] - return SceneModel(Ks, w2cs, fg, motion_bases, bg) - - def render( - self, - # A single time instance for view rendering. - t: int | None, - w2cs: torch.Tensor, # (C, 4, 4) - Ks: torch.Tensor, # (C, 3, 3) - img_wh: tuple[int, int], - # Multiple time instances for track rendering: (B,). - target_ts: torch.Tensor | None = None, # (B) - target_w2cs: torch.Tensor | None = None, # (B, 4, 4) - bg_color: torch.Tensor | float = 1.0, - colors_override: torch.Tensor | None = None, - means: torch.Tensor | None = None, - quats: torch.Tensor | None = None, - target_means: torch.Tensor | None = None, - return_color: bool = True, - return_depth: bool = False, - return_mask: bool = False, - fg_only: bool = False, - filter_mask: torch.Tensor | None = None, - ) -> dict: - device = w2cs.device - C = w2cs.shape[0] - - W, H = img_wh - pose_fnc = self.compute_poses_fg if fg_only else self.compute_poses_all - N = self.num_fg_gaussians if fg_only else self.num_gaussians - - if means is None or quats is None: - means, quats = pose_fnc( - torch.tensor([t], device=device) if t is not None else None - ) - means = means[:, 0] - quats = quats[:, 0] - - if colors_override is None: - if return_color: - colors_override = ( - self.fg.get_colors() if fg_only else self.get_colors_all() - ) - else: - colors_override = torch.zeros(N, 0, device=device) - - D = colors_override.shape[-1] - - scales = self.fg.get_scales() if fg_only else self.get_scales_all() - opacities = self.fg.get_opacities() if fg_only else self.get_opacities_all() - - if isinstance(bg_color, float): - bg_color = torch.full((C, D), bg_color, device=device) - assert isinstance(bg_color, torch.Tensor) - - mode = "RGB" - ds_expected = {"img": D} - - if return_mask: - if self.has_bg and not fg_only: - mask_values = torch.zeros((self.num_gaussians, 1), device=device) - mask_values[: self.num_fg_gaussians] = 1.0 - else: - mask_values = torch.ones((self.num_fg_gaussians, 1), device=device) - colors_override = torch.cat([colors_override, mask_values], dim=-1) - bg_color = torch.cat([bg_color, torch.zeros(C, 1, device=device)], dim=-1) - ds_expected["mask"] = 1 - - B = 0 - if target_ts is not None: - B = target_ts.shape[0] - if target_means is None: - target_means, _ = pose_fnc(target_ts) # [G, B, 3] - if target_w2cs is not None: - target_means = torch.einsum( - "bij,pbj->pbi", - target_w2cs[:, :3], - F.pad(target_means, (0, 1), value=1.0), - ) - track_3d_vals = target_means.flatten(-2) # (G, B * 3) - d_track = track_3d_vals.shape[-1] - colors_override = torch.cat([colors_override, track_3d_vals], dim=-1) - bg_color = torch.cat( - [bg_color, torch.zeros(C, track_3d_vals.shape[-1], device=device)], - dim=-1, - ) - ds_expected["tracks_3d"] = d_track - - assert colors_override.shape[-1] == sum(ds_expected.values()) - assert bg_color.shape[-1] == sum(ds_expected.values()) - - if return_depth: - mode = "RGB+ED" - ds_expected["depth"] = 1 - - if filter_mask is not None: - assert filter_mask.shape == (N,) - means = means[filter_mask] - quats = quats[filter_mask] - scales = scales[filter_mask] - opacities = opacities[filter_mask] - colors_override = colors_override[filter_mask] - - render_colors, alphas, info = rasterization( - means=means, - quats=quats, - scales=scales, - opacities=opacities, - colors=colors_override, - backgrounds=bg_color, - viewmats=w2cs, # [C, 4, 4] - Ks=Ks, # [C, 3, 3] - width=W, - height=H, - packed=False, - render_mode=mode, - ) - - # Populate the current data for adaptive gaussian control. - if self.training and info["means2d"].requires_grad: - self._current_xys = info["means2d"] - self._current_radii = info["radii"] - self._current_img_wh = img_wh - # We want to be able to access to xys' gradients later in a - # torch.no_grad context. - self._current_xys.retain_grad() - - assert render_colors.shape[-1] == sum(ds_expected.values()) - outputs = torch.split(render_colors, list(ds_expected.values()), dim=-1) - out_dict = {} - for i, (name, dim) in enumerate(ds_expected.items()): - x = outputs[i] - assert x.shape[-1] == dim, f"{x.shape[-1]=} != {dim=}" - if name == "tracks_3d": - x = x.reshape(C, H, W, B, 3) - out_dict[name] = x - out_dict["acc"] = alphas - return out_dict diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/tensor_dataclass.py b/som_out/swing/code/2024-10-26-020013/flow3d/tensor_dataclass.py deleted file mode 100644 index 1a55edaad8d96f0e535c923f47fdb8d310073c52..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/tensor_dataclass.py +++ /dev/null @@ -1,96 +0,0 @@ -from dataclasses import dataclass -from typing import Callable, TypeVar - -import torch -from typing_extensions import Self - -TensorDataclassT = TypeVar("T", bound="TensorDataclass") - - -class TensorDataclass: - """A lighter version of nerfstudio's TensorDataclass: - https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/utils/tensor_dataclass.py - """ - - def __getitem__(self, key) -> Self: - return self.map(lambda x: x[key]) - - def to(self, device: torch.device | str) -> Self: - """Move the tensors in the dataclass to the given device. - - Args: - device: The device to move to. - - Returns: - A new dataclass. - """ - return self.map(lambda x: x.to(device)) - - def map(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Self: - """Apply a function to all tensors in the dataclass. - - Also recurses into lists, tuples, and dictionaries. - - Args: - fn: The function to apply to each tensor. - - Returns: - A new dataclass. - """ - - MapT = TypeVar("MapT") - - def _map_impl( - fn: Callable[[torch.Tensor], torch.Tensor], - val: MapT, - ) -> MapT: - if isinstance(val, torch.Tensor): - return fn(val) - elif isinstance(val, TensorDataclass): - return type(val)(**_map_impl(fn, vars(val))) - elif isinstance(val, (list, tuple)): - return type(val)(_map_impl(fn, v) for v in val) - elif isinstance(val, dict): - assert type(val) is dict # No subclass support. - return {k: _map_impl(fn, v) for k, v in val.items()} # type: ignore - else: - return val - - return _map_impl(fn, self) - - -@dataclass -class TrackObservations(TensorDataclass): - xyz: torch.Tensor - visibles: torch.Tensor - invisibles: torch.Tensor - confidences: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape[:-1] - return ( - self.visibles.shape == dims - and self.invisibles.shape == dims - and self.confidences.shape == dims - and self.colors.shape[:-1] == dims[:-1] - and self.xyz.shape[-1] == 3 - and self.colors.shape[-1] == 3 - ) - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) - - -@dataclass -class StaticObservations(TensorDataclass): - xyz: torch.Tensor - normals: torch.Tensor - colors: torch.Tensor - - def check_sizes(self) -> bool: - dims = self.xyz.shape - return self.normals.shape == dims and self.colors.shape == dims - - def filter_valid(self, valid_mask: torch.Tensor) -> Self: - return self.map(lambda x: x[valid_mask]) diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/trainer.py b/som_out/swing/code/2024-10-26-020013/flow3d/trainer.py deleted file mode 100644 index 6fa0677f91de70a5a3678cba970600e4ae5b1def..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/trainer.py +++ /dev/null @@ -1,805 +0,0 @@ -import functools -import time -from dataclasses import asdict -from typing import cast - -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState -from pytorch_msssim import SSIM -from torch.utils.tensorboard import SummaryWriter # type: ignore - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.loss_utils import ( - compute_gradient_loss, - compute_se3_smoothness_loss, - compute_z_acc_loss, - masked_l1_loss, -) -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import get_server -from flow3d.vis.viewer import DynamicViewer - - -class Trainer: - def __init__( - self, - model: SceneModel, - device: torch.device, - lr_cfg: SceneLRConfig, - losses_cfg: LossesConfig, - optim_cfg: OptimizerConfig, - # Logging. - work_dir: str, - port: int | None = None, - log_every: int = 10, - checkpoint_every: int = 200, - validate_every: int = 500, - validate_video_every: int = 1000, - validate_viewer_assets_every: int = 100, - ): - - self.device = device - self.log_every = log_every - self.checkpoint_every = checkpoint_every - self.validate_every = validate_every - self.validate_video_every = validate_video_every - self.validate_viewer_assets_every = validate_viewer_assets_every - - self.model = model - self.num_frames = model.num_frames - - self.lr_cfg = lr_cfg - self.losses_cfg = losses_cfg - self.optim_cfg = optim_cfg - - self.reset_opacity_every = ( - self.optim_cfg.reset_opacity_every_n_controls * self.optim_cfg.control_every - ) - self.optimizers, self.scheduler = self.configure_optimizers() - - # running stats for adaptive density control - self.running_stats = { - "xys_grad_norm_acc": torch.zeros(self.model.num_gaussians, device=device), - "vis_count": torch.zeros( - self.model.num_gaussians, device=device, dtype=torch.int64 - ), - "max_radii": torch.zeros(self.model.num_gaussians, device=device), - } - - self.work_dir = work_dir - self.writer = SummaryWriter(log_dir=work_dir) - self.global_step = 0 - self.epoch = 0 - - self.viewer = None - if port is not None: - server = get_server(port=port) - self.viewer = DynamicViewer( - server, self.render_fn, model.num_frames, work_dir, mode="training" - ) - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS() - self.pck_metric = PCK() - self.bg_psnr_metric = mPSNR() - self.fg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.fg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS() - self.fg_lpips_metric = mLPIPS() - - def set_epoch(self, epoch: int): - self.epoch = epoch - - def save_checkpoint(self, path: str): - model_dict = self.model.state_dict() - optimizer_dict = {k: v.state_dict() for k, v in self.optimizers.items()} - scheduler_dict = {k: v.state_dict() for k, v in self.scheduler.items()} - ckpt = { - "model": model_dict, - "optimizers": optimizer_dict, - "schedulers": scheduler_dict, - "global_step": self.global_step, - "epoch": self.epoch, - } - torch.save(ckpt, path) - guru.info(f"Saved checkpoint at {self.global_step=} to {path}") - - @staticmethod - def init_from_checkpoint( - path: str, device: torch.device, *args, **kwargs - ) -> tuple["Trainer", int]: - guru.info(f"Loading checkpoint from {path}") - ckpt = torch.load(path) - state_dict = ckpt["model"] - model = SceneModel.init_from_state_dict(state_dict) - model = model.to(device) - trainer = Trainer(model, device, *args, **kwargs) - if "optimizers" in ckpt: - trainer.load_checkpoint_optimizers(ckpt["optimizers"]) - if "schedulers" in ckpt: - trainer.load_checkpoint_schedulers(ckpt["schedulers"]) - trainer.global_step = ckpt.get("global_step", 0) - start_epoch = ckpt.get("epoch", 0) - trainer.set_epoch(start_epoch) - return trainer, start_epoch - - def load_checkpoint_optimizers(self, opt_ckpt): - for k, v in self.optimizers.items(): - v.load_state_dict(opt_ckpt[k]) - - def load_checkpoint_schedulers(self, sched_ckpt): - for k, v in self.scheduler.items(): - v.load_state_dict(sched_ckpt[k]) - - @torch.inference_mode() - def render_fn(self, camera_state: CameraState, img_wh: tuple[int, int]): - W, H = img_wh - - focal = 0.5 * H / np.tan(0.5 * camera_state.fov).item() - K = torch.tensor( - [[focal, 0.0, W / 2.0], [0.0, focal, H / 2.0], [0.0, 0.0, 1.0]], - device=self.device, - ) - w2c = torch.linalg.inv( - torch.from_numpy(camera_state.c2w.astype(np.float32)).to(self.device) - ) - t = 0 - if self.viewer is not None: - t = ( - int(self.viewer._playback_guis[0].value) - if not self.viewer._canonical_checkbox.value - else None - ) - self.model.training = False - img = self.model.render(t, w2c[None], K[None], img_wh)["img"][0] - return (img.cpu().numpy() * 255.0).astype(np.uint8) - - def train_step(self, batch): - if self.viewer is not None: - while self.viewer.state.status == "paused": - time.sleep(0.1) - self.viewer.lock.acquire() - - loss, stats, num_rays_per_step, num_rays_per_sec = self.compute_losses(batch) - if loss.isnan(): - guru.info(f"Loss is NaN at step {self.global_step}!!") - import ipdb - - ipdb.set_trace() - loss.backward() - - for opt in self.optimizers.values(): - opt.step() - opt.zero_grad(set_to_none=True) - for sched in self.scheduler.values(): - sched.step() - - self.log_dict(stats) - self.global_step += 1 - self.run_control_steps() - - if self.viewer is not None: - self.viewer.lock.release() - self.viewer.state.num_train_rays_per_sec = num_rays_per_sec - if self.viewer.mode == "training": - self.viewer.update(self.global_step, num_rays_per_step) - - if self.global_step % self.checkpoint_every == 0: - self.save_checkpoint(f"{self.work_dir}/checkpoints/last.ckpt") - - return loss.item() - - def compute_losses(self, batch): - self.model.training = True - B = batch["imgs"].shape[0] - W, H = img_wh = batch["imgs"].shape[2:0:-1] - N = batch["target_ts"][0].shape[0] - - # (B,). - ts = batch["ts"] - # (B, 4, 4). - w2cs = batch["w2cs"] - # (B, 3, 3). - Ks = batch["Ks"] - # (B, H, W, 3). - imgs = batch["imgs"] - # (B, H, W). - valid_masks = batch.get("valid_masks", torch.ones_like(batch["imgs"][..., 0])) - # (B, H, W). - masks = batch["masks"] - masks *= valid_masks - # (B, H, W). - depths = batch["depths"] - # [(P, 2), ...]. - query_tracks_2d = batch["query_tracks_2d"] - # [(N,), ...]. - target_ts = batch["target_ts"] - # [(N, 4, 4), ...]. - target_w2cs = batch["target_w2cs"] - # [(N, 3, 3), ...]. - target_Ks = batch["target_Ks"] - # [(N, P, 2), ...]. - target_tracks_2d = batch["target_tracks_2d"] - # [(N, P), ...]. - target_visibles = batch["target_visibles"] - # [(N, P), ...]. - target_invisibles = batch["target_invisibles"] - # [(N, P), ...]. - target_confidences = batch["target_confidences"] - # [(N, P), ...]. - target_track_depths = batch["target_track_depths"] - - _tic = time.time() - # (B, G, 3). - means, quats = self.model.compute_poses_all(ts) # (G, B, 3), (G, B, 4) - device = means.device - means = means.transpose(0, 1) - quats = quats.transpose(0, 1) - # [(N, G, 3), ...]. - target_ts_vec = torch.cat(target_ts) - # (B * N, G, 3). - target_means, _ = self.model.compute_poses_all(target_ts_vec) - target_means = target_means.transpose(0, 1) - target_mean_list = target_means.split(N) - num_frames = self.model.num_frames - - loss = 0.0 - - bg_colors = [] - rendered_all = [] - self._batched_xys = [] - self._batched_radii = [] - self._batched_img_wh = [] - for i in range(B): - bg_color = torch.ones(1, 3, device=device) - rendered = self.model.render( - ts[i].item(), - w2cs[None, i], - Ks[None, i], - img_wh, - target_ts=target_ts[i], - target_w2cs=target_w2cs[i], - bg_color=bg_color, - means=means[i], - quats=quats[i], - target_means=target_mean_list[i].transpose(0, 1), - return_depth=True, - return_mask=self.model.has_bg, - ) - rendered_all.append(rendered) - bg_colors.append(bg_color) - if ( - self.model._current_xys is not None - and self.model._current_radii is not None - and self.model._current_img_wh is not None - ): - self._batched_xys.append(self.model._current_xys) - self._batched_radii.append(self.model._current_radii) - self._batched_img_wh.append(self.model._current_img_wh) - - # Necessary to make viewer work. - num_rays_per_step = H * W * B - num_rays_per_sec = num_rays_per_step / (time.time() - _tic) - - # (B, H, W, N, *). - rendered_all = { - key: ( - torch.cat([out_dict[key] for out_dict in rendered_all], dim=0) - if rendered_all[0][key] is not None - else None - ) - for key in rendered_all[0] - } - bg_colors = torch.cat(bg_colors, dim=0) - - # Compute losses. - # (B * N). - frame_intervals = (ts.repeat_interleave(N) - target_ts_vec).abs() - if not self.model.has_bg: - imgs = ( - imgs * masks[..., None] - + (1.0 - masks[..., None]) * bg_colors[:, None, None] - ) - else: - imgs = ( - imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - # (P_all, 2). - tracks_2d = torch.cat([x.reshape(-1, 2) for x in target_tracks_2d], dim=0) - # (P_all,) - visibles = torch.cat([x.reshape(-1) for x in target_visibles], dim=0) - # (P_all,) - confidences = torch.cat([x.reshape(-1) for x in target_confidences], dim=0) - - # RGB loss. - rendered_imgs = cast(torch.Tensor, rendered_all["img"]) - if self.model.has_bg: - rendered_imgs = ( - rendered_imgs * valid_masks[..., None] - + (1.0 - valid_masks[..., None]) * bg_colors[:, None, None] - ) - rgb_loss = 0.8 * F.l1_loss(rendered_imgs, imgs) + 0.2 * ( - 1 - self.ssim(rendered_imgs.permute(0, 3, 1, 2), imgs.permute(0, 3, 1, 2)) - ) - loss += rgb_loss * self.losses_cfg.w_rgb - - # Mask loss. - if not self.model.has_bg: - mask_loss = F.mse_loss(rendered_all["acc"], masks[..., None]) # type: ignore - else: - mask_loss = F.mse_loss( - rendered_all["acc"], torch.ones_like(rendered_all["acc"]) # type: ignore - ) + masked_l1_loss( - rendered_all["mask"], - masks[..., None], - quantile=0.98, # type: ignore - ) - loss += mask_loss * self.losses_cfg.w_mask - - # (B * N, H * W, 3). - pred_tracks_3d = ( - rendered_all["tracks_3d"].permute(0, 3, 1, 2, 4).reshape(-1, H * W, 3) # type: ignore - ) - pred_tracks_2d = torch.einsum( - "bij,bpj->bpi", torch.cat(target_Ks), pred_tracks_3d - ) - # (B * N, H * W, 1). - mapped_depth = torch.clamp(pred_tracks_2d[..., 2:], min=1e-6) - # (B * N, H * W, 2). - pred_tracks_2d = pred_tracks_2d[..., :2] / mapped_depth - - # (B * N). - w_interval = torch.exp(-2 * frame_intervals / num_frames) - # w_track_loss = min(1, (self.max_steps - self.global_step) / 6000) - track_weights = confidences[..., None] * w_interval - - # (B, H, W). - masks_flatten = torch.zeros_like(masks) - for i in range(B): - # This takes advantage of the fact that the query 2D tracks are - # always on the grid. - query_pixels = query_tracks_2d[i].to(torch.int64) - masks_flatten[i, query_pixels[:, 1], query_pixels[:, 0]] = 1.0 - # (B * N, H * W). - masks_flatten = ( - masks_flatten.reshape(-1, H * W).tile(1, N).reshape(-1, H * W) > 0.5 - ) - - track_2d_loss = masked_l1_loss( - pred_tracks_2d[masks_flatten][visibles], - tracks_2d[visibles], - mask=track_weights[visibles], - quantile=0.98, - ) / max(H, W) - loss += track_2d_loss * self.losses_cfg.w_track - - depth_masks = ( - masks[..., None] if not self.model.has_bg else valid_masks[..., None] - ) - - pred_depth = cast(torch.Tensor, rendered_all["depth"]) - pred_disp = 1.0 / (pred_depth + 1e-5) - tgt_disp = 1.0 / (depths[..., None] + 1e-5) - depth_loss = masked_l1_loss( - pred_disp, - tgt_disp, - mask=depth_masks, - quantile=0.98, - ) - # depth_loss = cauchy_loss_with_uncertainty( - # pred_disp.squeeze(-1), - # tgt_disp.squeeze(-1), - # depth_masks.squeeze(-1), - # self.depth_uncertainty_activation(self.depth_uncertainties)[ts], - # bias=1e-3, - # ) - loss += depth_loss * self.losses_cfg.w_depth_reg - - # mapped depth loss (using cached depth with EMA) - # mapped_depth_loss = 0.0 - mapped_depth_gt = torch.cat([x.reshape(-1) for x in target_track_depths], dim=0) - mapped_depth_loss = masked_l1_loss( - 1 / (mapped_depth[masks_flatten][visibles] + 1e-5), - 1 / (mapped_depth_gt[visibles, None] + 1e-5), - track_weights[visibles], - ) - - loss += mapped_depth_loss * self.losses_cfg.w_depth_const - - # depth_gradient_loss = 0.0 - depth_gradient_loss = compute_gradient_loss( - pred_disp, - tgt_disp, - mask=depth_masks > 0.5, - quantile=0.95, - ) - # depth_gradient_loss = compute_gradient_loss( - # pred_disps, - # ref_disps, - # mask=depth_masks.squeeze(-1) > 0.5, - # c=depth_uncertainty.detach(), - # mode="l1", - # bias=1e-3, - # ) - loss += depth_gradient_loss * self.losses_cfg.w_depth_grad - - # bases should be smooth. - small_accel_loss = compute_se3_smoothness_loss( - self.model.motion_bases.params["rots"], - self.model.motion_bases.params["transls"], - ) - loss += small_accel_loss * self.losses_cfg.w_smooth_bases - - # tracks should be smooth - ts = torch.clamp(ts, min=1, max=num_frames - 2) - ts_neighbors = torch.cat((ts - 1, ts, ts + 1)) - transfms_nbs = self.model.compute_transforms(ts_neighbors) # (G, 3n, 3, 4) - means_fg_nbs = torch.einsum( - "pnij,pj->pni", - transfms_nbs, - F.pad(self.model.fg.params["means"], (0, 1), value=1.0), - ) - means_fg_nbs = means_fg_nbs.reshape( - means_fg_nbs.shape[0], 3, -1, 3 - ) # [G, 3, n, 3] - if self.losses_cfg.w_smooth_tracks > 0: - small_accel_loss_tracks = 0.5 * ( - (2 * means_fg_nbs[:, 1:-1] - means_fg_nbs[:, :-2] - means_fg_nbs[:, 2:]) - .norm(dim=-1) - .mean() - ) - loss += small_accel_loss_tracks * self.losses_cfg.w_smooth_tracks - - # Constrain the std of scales. - # TODO: do we want to penalize before or after exp? - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.fg.params["scales"], dim=-1).mean() - ) - if self.model.bg is not None: - loss += ( - self.losses_cfg.w_scale_var - * torch.var(self.model.bg.params["scales"], dim=-1).mean() - ) - - # # sparsity loss - # loss += 0.01 * self.opacity_activation(self.opacities).abs().mean() - - # Acceleration along ray direction should be small. - z_accel_loss = compute_z_acc_loss(means_fg_nbs, w2cs) - loss += self.losses_cfg.w_z_accel * z_accel_loss - - # Prepare stats for logging. - stats = { - "train/loss": loss.item(), - "train/rgb_loss": rgb_loss.item(), - "train/mask_loss": mask_loss.item(), - "train/depth_loss": depth_loss.item(), - "train/depth_gradient_loss": depth_gradient_loss.item(), - "train/mapped_depth_loss": mapped_depth_loss.item(), - "train/track_2d_loss": track_2d_loss.item(), - "train/small_accel_loss": small_accel_loss.item(), - "train/z_acc_loss": z_accel_loss.item(), - "train/num_gaussians": self.model.num_gaussians, - "train/num_fg_gaussians": self.model.num_fg_gaussians, - "train/num_bg_gaussians": self.model.num_bg_gaussians, - } - - # Compute metrics. - with torch.no_grad(): - psnr = self.psnr_metric( - rendered_imgs, imgs, masks if not self.model.has_bg else valid_masks - ) - self.psnr_metric.reset() - stats["train/psnr"] = psnr - if self.model.has_bg: - bg_psnr = self.bg_psnr_metric(rendered_imgs, imgs, 1.0 - masks) - fg_psnr = self.fg_psnr_metric(rendered_imgs, imgs, masks) - self.bg_psnr_metric.reset() - self.fg_psnr_metric.reset() - stats["train/bg_psnr"] = bg_psnr - stats["train/fg_psnr"] = fg_psnr - - stats.update( - **{ - "train/num_rays_per_sec": num_rays_per_sec, - "train/num_rays_per_step": float(num_rays_per_step), - } - ) - - return loss, stats, num_rays_per_step, num_rays_per_sec - - def log_dict(self, stats: dict): - for k, v in stats.items(): - self.writer.add_scalar(k, v, self.global_step) - - def run_control_steps(self): - global_step = self.global_step - # Adaptive gaussian control. - cfg = self.optim_cfg - num_frames = self.model.num_frames - ready = self._prepare_control_step() - if ( - ready - and global_step > cfg.warmup_steps - and global_step % cfg.control_every == 0 - and global_step < cfg.stop_control_steps - ): - if ( - global_step < cfg.stop_densify_steps - and global_step % self.reset_opacity_every > num_frames - ): - self._densify_control_step(global_step) - if global_step % self.reset_opacity_every > min(3 * num_frames, 1000): - self._cull_control_step(global_step) - if global_step % self.reset_opacity_every == 0: - self._reset_opacity_control_step() - - # Reset stats after every control. - for k in self.running_stats: - self.running_stats[k].zero_() - - @torch.no_grad() - def _prepare_control_step(self) -> bool: - # Prepare for adaptive gaussian control based on the current stats. - if not ( - self.model._current_radii is not None - and self.model._current_xys is not None - ): - guru.warning("Model not training, skipping control step preparation") - return False - - batch_size = len(self._batched_xys) - # these quantities are for each rendered view and have shapes (C, G, *) - # must be aggregated over all views - for _current_xys, _current_radii, _current_img_wh in zip( - self._batched_xys, self._batched_radii, self._batched_img_wh - ): - sel = _current_radii > 0 - gidcs = torch.where(sel)[1] - # normalize grads to [-1, 1] screen space - xys_grad = _current_xys.grad.clone() - xys_grad[..., 0] *= _current_img_wh[0] / 2.0 * batch_size - xys_grad[..., 1] *= _current_img_wh[1] / 2.0 * batch_size - self.running_stats["xys_grad_norm_acc"].index_add_( - 0, gidcs, xys_grad[sel].norm(dim=-1) - ) - self.running_stats["vis_count"].index_add_( - 0, gidcs, torch.ones_like(gidcs, dtype=torch.int64) - ) - max_radii = torch.maximum( - self.running_stats["max_radii"].index_select(0, gidcs), - _current_radii[sel] / max(_current_img_wh), - ) - self.running_stats["max_radii"].index_put((gidcs,), max_radii) - return True - - @torch.no_grad() - def _densify_control_step(self, global_step): - assert (self.running_stats["vis_count"] > 0).any() - - cfg = self.optim_cfg - xys_grad_avg = self.running_stats["xys_grad_norm_acc"] / self.running_stats[ - "vis_count" - ].clamp_min(1) - is_grad_too_high = xys_grad_avg > cfg.densify_xys_grad_threshold - # Split gaussians. - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cfg.densify_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.densify_screen_threshold - ) - else: - is_radius_too_big = torch.zeros_like(is_grad_too_high, dtype=torch.bool) - - should_split = is_grad_too_high & (is_scale_too_big | is_radius_too_big) - should_dup = is_grad_too_high & ~is_scale_too_big - - num_fg = self.model.num_fg_gaussians - should_fg_split = should_split[:num_fg] - num_fg_splits = int(should_fg_split.sum().item()) - should_fg_dup = should_dup[:num_fg] - num_fg_dups = int(should_fg_dup.sum().item()) - - should_bg_split = should_split[num_fg:] - num_bg_splits = int(should_bg_split.sum().item()) - should_bg_dup = should_dup[num_fg:] - num_bg_dups = int(should_bg_dup.sum().item()) - - fg_param_map = self.model.fg.densify_params(should_fg_split, should_fg_dup) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_fg_split, - num_fg_splits * 2 + num_fg_dups, - ) - - if self.model.bg is not None: - bg_param_map = self.model.bg.densify_params(should_bg_split, should_bg_dup) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - dup_in_optim( - optimizer, - [new_params], - should_bg_split, - num_bg_splits * 2 + num_bg_dups, - ) - - # update running stats - for k, v in self.running_stats.items(): - v_fg, v_bg = v[:num_fg], v[num_fg:] - new_v = torch.cat( - [ - v_fg[~should_fg_split], - v_fg[should_fg_dup], - v_fg[should_fg_split].repeat(2), - v_bg[~should_bg_split], - v_bg[should_bg_dup], - v_bg[should_bg_split].repeat(2), - ], - dim=0, - ) - self.running_stats[k] = new_v - guru.info( - f"Split {should_split.sum().item()} gaussians, " - f"Duplicated {should_dup.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _cull_control_step(self, global_step): - # Cull gaussians. - cfg = self.optim_cfg - opacities = self.model.get_opacities_all() - device = opacities.device - is_opacity_too_small = opacities < cfg.cull_opacity_threshold - is_radius_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - is_scale_too_big = torch.zeros_like(is_opacity_too_small, dtype=torch.bool) - cull_scale_threshold = ( - torch.ones(len(is_scale_too_big), device=device) * cfg.cull_scale_threshold - ) - num_fg = self.model.num_fg_gaussians - cull_scale_threshold[num_fg:] *= self.model.bg_scene_scale - if global_step > self.reset_opacity_every: - scales = self.model.get_scales_all() - is_scale_too_big = scales.amax(dim=-1) > cull_scale_threshold - if global_step < cfg.stop_control_by_screen_steps: - is_radius_too_big = ( - self.running_stats["max_radii"] > cfg.cull_screen_threshold - ) - should_cull = is_opacity_too_small | is_radius_too_big | is_scale_too_big - should_fg_cull = should_cull[:num_fg] - should_bg_cull = should_cull[num_fg:] - - fg_param_map = self.model.fg.cull_params(should_fg_cull) - for param_name, new_params in fg_param_map.items(): - full_param_name = f"fg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_fg_cull) - - if self.model.bg is not None: - bg_param_map = self.model.bg.cull_params(should_bg_cull) - for param_name, new_params in bg_param_map.items(): - full_param_name = f"bg.params.{param_name}" - optimizer = self.optimizers[full_param_name] - remove_from_optim(optimizer, [new_params], should_bg_cull) - - # update running stats - for k, v in self.running_stats.items(): - self.running_stats[k] = v[~should_cull] - - guru.info( - f"Culled {should_cull.sum().item()} gaussians, " - f"{self.model.num_gaussians} gaussians left" - ) - - @torch.no_grad() - def _reset_opacity_control_step(self): - # Reset gaussian opacities. - new_val = torch.logit(torch.tensor(0.8 * self.optim_cfg.cull_opacity_threshold)) - for part in ["fg", "bg"]: - part_params = getattr(self.model, part).reset_opacities(new_val) - # Modify optimizer states by new assignment. - for param_name, new_params in part_params.items(): - full_param_name = f"{part}.params.{param_name}" - optimizer = self.optimizers[full_param_name] - reset_in_optim(optimizer, [new_params]) - guru.info("Reset opacities") - - def configure_optimizers(self): - def _exponential_decay(step, *, lr_init, lr_final): - t = np.clip(step / self.optim_cfg.max_steps, 0.0, 1.0) - lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) - return lr / lr_init - - lr_dict = asdict(self.lr_cfg) - optimizers = {} - schedulers = {} - # named parameters will be [part].params.[field] - # e.g. fg.params.means - # lr config is a nested dict for each fg/bg part - for name, params in self.model.named_parameters(): - part, _, field = name.split(".") - lr = lr_dict[part][field] - optim = torch.optim.Adam([{"params": params, "lr": lr, "name": name}]) - - if "scales" in name: - fnc = functools.partial(_exponential_decay, lr_final=0.1 * lr) - else: - fnc = lambda _, **__: 1.0 - - optimizers[name] = optim - schedulers[name] = torch.optim.lr_scheduler.LambdaLR( - optim, functools.partial(fnc, lr_init=lr) - ) - return optimizers, schedulers - - -def dup_in_optim(optimizer, new_params: list, should_dup: torch.Tensor, num_dups: int): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - p = param_state[key] - param_state[key] = torch.cat( - [p[~should_dup], p.new_zeros(num_dups, *p.shape[1:])], - dim=0, - ) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def remove_from_optim(optimizer, new_params: list, _should_cull: torch.Tensor): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - if key == "step": - continue - param_state[key] = param_state[key][~_should_cull] - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() - - -def reset_in_optim(optimizer, new_params: list): - assert len(optimizer.param_groups) == len(new_params) - for i, p_new in enumerate(new_params): - old_params = optimizer.param_groups[i]["params"][0] - param_state = optimizer.state[old_params] - if len(param_state) == 0: - return - for key in param_state: - param_state[key] = torch.zeros_like(param_state[key]) - del optimizer.state[old_params] - optimizer.state[p_new] = param_state - optimizer.param_groups[i]["params"] = [p_new] - del old_params - torch.cuda.empty_cache() diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/trajectories.py b/som_out/swing/code/2024-10-26-020013/flow3d/trajectories.py deleted file mode 100644 index 98eb9350fd368c5d6e1a1621fa068d79fc5fc5c7..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/trajectories.py +++ /dev/null @@ -1,200 +0,0 @@ -import numpy as np -import roma -import torch -import torch.nn.functional as F - -from .transforms import rt_to_mat4 - - -def get_avg_w2c(w2cs: torch.Tensor): - c2ws = torch.linalg.inv(w2cs) - # 1. Compute the center - center = c2ws[:, :3, -1].mean(0) - # 2. Compute the z axis - z = F.normalize(c2ws[:, :3, 2].mean(0), dim=-1) - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = c2ws[:, :3, 1].mean(0) # (3) - # 4. Compute the x axis - x = F.normalize(torch.cross(y_, z, dim=-1), dim=-1) # (3) - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = torch.cross(z, x, dim=-1) # (3) - avg_c2w = rt_to_mat4(torch.stack([x, y, z], 1), center) - avg_w2c = torch.linalg.inv(avg_c2w) - return avg_w2c - - -def get_lookat(origins: torch.Tensor, viewdirs: torch.Tensor) -> torch.Tensor: - """Triangulate a set of rays to find a single lookat point. - - Args: - origins (torch.Tensor): A (N, 3) array of ray origins. - viewdirs (torch.Tensor): A (N, 3) array of ray view directions. - - Returns: - torch.Tensor: A (3,) lookat point. - """ - - viewdirs = torch.nn.functional.normalize(viewdirs, dim=-1) - eye = torch.eye(3, device=origins.device, dtype=origins.dtype)[None] - # Calculate projection matrix I - rr^T - I_min_cov = eye - (viewdirs[..., None] * viewdirs[..., None, :]) - # Compute sum of projections - sum_proj = I_min_cov.matmul(origins[..., None]).sum(dim=-3) - # Solve for the intersection point using least squares - lookat = torch.linalg.lstsq(I_min_cov.sum(dim=-3), sum_proj).solution[..., 0] - # Check NaNs. - assert not torch.any(torch.isnan(lookat)) - return lookat - - -def get_lookat_w2cs(positions: torch.Tensor, lookat: torch.Tensor, up: torch.Tensor): - """ - Args: - positions: (N, 3) tensor of camera positions - lookat: (3,) tensor of lookat point - up: (3,) tensor of up vector - - Returns: - w2cs: (N, 3, 3) tensor of world to camera rotation matrices - """ - forward_vectors = F.normalize(lookat - positions, dim=-1) - right_vectors = F.normalize(torch.cross(forward_vectors, up[None], dim=-1), dim=-1) - down_vectors = F.normalize( - torch.cross(forward_vectors, right_vectors, dim=-1), dim=-1 - ) - Rs = torch.stack([right_vectors, down_vectors, forward_vectors], dim=-1) - w2cs = torch.linalg.inv(rt_to_mat4(Rs, positions)) - return w2cs - - -def get_arc_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_position = torch.linalg.inv(ref_w2c)[:3, 3] - thetas = ( - torch.sin( - torch.linspace(0.0, torch.pi * 2.0, num_frames + 1, device=ref_w2c.device)[ - :-1 - ] - ) - * (degree / 2.0) - / 180.0 - * torch.pi - ) - positions = torch.einsum( - "nij,j->ni", - roma.rotvec_to_rotmat(thetas[:, None] * up[None]), - ref_position - lookat, - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_lemniscate_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - degree: float, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - a = torch.linalg.norm(ref_c2w[:3, 3] - lookat) * np.tan(degree / 360 * np.pi) - # Lemniscate curve in camera space. Starting at the origin. - thetas = ( - torch.linspace(0, 2 * torch.pi, num_frames + 1, device=ref_w2c.device)[:-1] - + torch.pi / 2 - ) - positions = torch.stack( - [ - a * torch.cos(thetas) / (1 + torch.sin(thetas) ** 2), - a * torch.cos(thetas) * torch.sin(thetas) / (1 + torch.sin(thetas) ** 2), - torch.zeros(num_frames, device=ref_w2c.device), - ], - dim=-1, - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_spiral_w2cs( - ref_w2c: torch.Tensor, - lookat: torch.Tensor, - up: torch.Tensor, - num_frames: int, - rads: float | torch.Tensor, - zrate: float, - rots: int, - **_, -) -> torch.Tensor: - ref_c2w = torch.linalg.inv(ref_w2c) - thetas = torch.linspace( - 0, 2 * torch.pi * rots, num_frames + 1, device=ref_w2c.device - )[:-1] - # Spiral curve in camera space. Starting at the origin. - if isinstance(rads, torch.Tensor): - rads = rads.reshape(-1, 3).to(ref_w2c.device) - positions = ( - torch.stack( - [ - torch.cos(thetas), - -torch.sin(thetas), - -torch.sin(thetas * zrate), - ], - dim=-1, - ) - * rads - ) - # Transform to world space. - positions = torch.einsum( - "ij,nj->ni", ref_c2w[:3], F.pad(positions, (0, 1), value=1.0) - ) - return get_lookat_w2cs(positions, lookat, up) - - -def get_wander_w2cs(ref_w2c, focal_length, num_frames, **_): - device = ref_w2c.device - c2w = np.linalg.inv(ref_w2c.detach().cpu().numpy()) - max_disp = 48.0 - - max_trans = max_disp / focal_length - output_poses = [] - - for i in range(num_frames): - x_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames)) - y_trans = 0.0 - z_trans = max_trans * np.cos(2.0 * np.pi * float(i) / float(num_frames)) / 2.0 - - i_pose = np.concatenate( - [ - np.concatenate( - [ - np.eye(3), - np.array([x_trans, y_trans, z_trans])[:, np.newaxis], - ], - axis=1, - ), - np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :], - ], - axis=0, - ) - - i_pose = np.linalg.inv(i_pose) - - ref_pose = np.concatenate( - [c2w[:3, :4], np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]], axis=0 - ) - - render_pose = np.dot(ref_pose, i_pose) - output_poses.append(render_pose) - output_poses = torch.from_numpy(np.array(output_poses, dtype=np.float32)).to(device) - w2cs = torch.linalg.inv(output_poses) - - return w2cs diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/transforms.py b/som_out/swing/code/2024-10-26-020013/flow3d/transforms.py deleted file mode 100644 index 9ff7cc260c586d6da054729c7cda2ce8c57cecd9..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/transforms.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import Literal - -import roma -import torch -import torch.nn.functional as F - - -def rt_to_mat4( - R: torch.Tensor, t: torch.Tensor, s: torch.Tensor | None = None -) -> torch.Tensor: - """ - Args: - R (torch.Tensor): (..., 3, 3). - t (torch.Tensor): (..., 3). - s (torch.Tensor): (...,). - - Returns: - torch.Tensor: (..., 4, 4) - """ - mat34 = torch.cat([R, t[..., None]], dim=-1) - if s is None: - bottom = ( - mat34.new_tensor([[0.0, 0.0, 0.0, 1.0]]) - .reshape((1,) * (mat34.dim() - 2) + (1, 4)) - .expand(mat34.shape[:-2] + (1, 4)) - ) - else: - bottom = F.pad(1.0 / s[..., None, None], (3, 0), value=0.0) - mat4 = torch.cat([mat34, bottom], dim=-2) - return mat4 - - -def rmat_to_cont_6d(matrix): - """ - :param matrix (*, 3, 3) - :returns 6d vector (*, 6) - """ - return torch.cat([matrix[..., 0], matrix[..., 1]], dim=-1) - - -def cont_6d_to_rmat(cont_6d): - """ - :param 6d vector (*, 6) - :returns matrix (*, 3, 3) - """ - x1 = cont_6d[..., 0:3] - y1 = cont_6d[..., 3:6] - - x = F.normalize(x1, dim=-1) - y = F.normalize(y1 - (y1 * x).sum(dim=-1, keepdim=True) * x, dim=-1) - z = torch.linalg.cross(x, y, dim=-1) - - return torch.stack([x, y, z], dim=-1) - - -def solve_procrustes( - src: torch.Tensor, - dst: torch.Tensor, - weights: torch.Tensor | None = None, - enforce_se3: bool = False, - rot_type: Literal["quat", "mat", "6d"] = "quat", -): - """ - Solve the Procrustes problem to align two point clouds, by solving the - following problem: - - min_{s, R, t} || s * (src @ R.T + t) - dst ||_2, s.t. R.T @ R = I and det(R) = 1. - - Args: - src (torch.Tensor): (N, 3). - dst (torch.Tensor): (N, 3). - weights (torch.Tensor | None): (N,), optional weights for alignment. - enforce_se3 (bool): Whether to enforce the transfm to be SE3. - - Returns: - sim3 (tuple[torch.Tensor, torch.Tensor, torch.Tensor]): - q (torch.Tensor): (4,), rotation component in quaternion of WXYZ - format. - t (torch.Tensor): (3,), translation component. - s (torch.Tensor): (), scale component. - error (torch.Tensor): (), average L2 distance after alignment. - """ - # Compute weights. - if weights is None: - weights = src.new_ones(src.shape[0]) - weights = weights[:, None] / weights.sum() - # Normalize point positions. - src_mean = (src * weights).sum(dim=0) - dst_mean = (dst * weights).sum(dim=0) - src_cent = src - src_mean - dst_cent = dst - dst_mean - # Normalize point scales. - if not enforce_se3: - src_scale = (src_cent**2 * weights).sum(dim=-1).mean().sqrt() - dst_scale = (dst_cent**2 * weights).sum(dim=-1).mean().sqrt() - else: - src_scale = dst_scale = src.new_tensor(1.0) - src_scaled = src_cent / src_scale - dst_scaled = dst_cent / dst_scale - # Compute the matrix for the singular value decomposition (SVD). - matrix = (weights * dst_scaled).T @ src_scaled - U, _, Vh = torch.linalg.svd(matrix) - # Special reflection case. - S = torch.eye(3, device=src.device) - if torch.det(U) * torch.det(Vh) < 0: - S[2, 2] = -1 - R = U @ S @ Vh - # Compute the transformation. - if rot_type == "quat": - rot = roma.rotmat_to_unitquat(R).roll(1, dims=-1) - elif rot_type == "6d": - rot = rmat_to_cont_6d(R) - else: - rot = R - s = dst_scale / src_scale - t = dst_mean / s - src_mean @ R.T - sim3 = rot, t, s - # Debug: error. - procrustes_dst = torch.einsum( - "ij,nj->ni", rt_to_mat4(R, t, s), F.pad(src, (0, 1), value=1.0) - ) - procrustes_dst = procrustes_dst[:, :3] / procrustes_dst[:, 3:] - error_before = (torch.linalg.norm(dst - src, dim=-1) * weights[:, 0]).sum() - error = (torch.linalg.norm(dst - procrustes_dst, dim=-1) * weights[:, 0]).sum() - # print(f"Procrustes error: {error_before} -> {error}") - # if error_before < error: - # print("Something is wrong.") - # __import__("ipdb").set_trace() - return sim3, (error.item(), error_before.item()) diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/validator.py b/som_out/swing/code/2024-10-26-020013/flow3d/validator.py deleted file mode 100644 index 2dde198e86fac558e886b4fcac910dad3615430c..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/validator.py +++ /dev/null @@ -1,421 +0,0 @@ -import functools -import os -import os.path as osp -import time -from dataclasses import asdict -from typing import cast - -import imageio as iio -import numpy as np -import torch -import torch.nn.functional as F -from loguru import logger as guru -from nerfview import CameraState, Viewer -from pytorch_msssim import SSIM -from torch.utils.data import DataLoader, Dataset -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from flow3d.configs import LossesConfig, OptimizerConfig, SceneLRConfig -from flow3d.data.utils import normalize_coords, to_device -from flow3d.metrics import PCK, mLPIPS, mPSNR, mSSIM -from flow3d.scene_model import SceneModel -from flow3d.vis.utils import ( - apply_depth_colormap, - make_video_divisble, - plot_correspondences, -) - - -class Validator: - def __init__( - self, - model: SceneModel, - device: torch.device, - train_loader: DataLoader | None, - val_img_loader: DataLoader | None, - val_kpt_loader: DataLoader | None, - save_dir: str, - ): - self.model = model - self.device = device - self.train_loader = train_loader - self.val_img_loader = val_img_loader - self.val_kpt_loader = val_kpt_loader - self.save_dir = save_dir - self.has_bg = self.model.has_bg - - # metrics - self.ssim = SSIM(data_range=1.0, size_average=True, channel=3) - self.psnr_metric = mPSNR() - self.ssim_metric = mSSIM() - self.lpips_metric = mLPIPS().to(device) - self.fg_psnr_metric = mPSNR() - self.fg_ssim_metric = mSSIM() - self.fg_lpips_metric = mLPIPS().to(device) - self.bg_psnr_metric = mPSNR() - self.bg_ssim_metric = mSSIM() - self.bg_lpips_metric = mLPIPS().to(device) - self.pck_metric = PCK() - - def reset_metrics(self): - self.psnr_metric.reset() - self.ssim_metric.reset() - self.lpips_metric.reset() - self.fg_psnr_metric.reset() - self.fg_ssim_metric.reset() - self.fg_lpips_metric.reset() - self.bg_psnr_metric.reset() - self.bg_ssim_metric.reset() - self.bg_lpips_metric.reset() - self.pck_metric.reset() - - @torch.no_grad() - def validate(self): - self.reset_metrics() - metric_imgs = self.validate_imgs() or {} - metric_kpts = self.validate_keypoints() or {} - return {**metric_imgs, **metric_kpts} - - @torch.no_grad() - def validate_imgs(self): - guru.info("rendering validation images...") - if self.val_img_loader is None: - return - - for batch in tqdm(self.val_img_loader, desc="render val images"): - batch = to_device(batch, self.device) - frame_name = batch["frame_names"][0] - t = batch["ts"][0] - # (1, 4, 4). - w2c = batch["w2cs"] - # (1, 3, 3). - K = batch["Ks"] - # (1, H, W, 3). - img = batch["imgs"] - # (1, H, W). - valid_mask = batch.get( - "valid_masks", torch.ones_like(batch["imgs"][..., 0]) - ) - # (1, H, W). - fg_mask = batch["masks"] - - # (H, W). - covisible_mask = batch.get( - "covisible_masks", - torch.ones_like(fg_mask)[None], - ) - W, H = img_wh = img[0].shape[-2::-1] - rendered = self.model.render(t, w2c, K, img_wh, return_depth=True) - - # Compute metrics. - valid_mask *= covisible_mask - fg_valid_mask = fg_mask * valid_mask - bg_valid_mask = (1 - fg_mask) * valid_mask - main_valid_mask = valid_mask if self.has_bg else fg_valid_mask - - self.psnr_metric.update(rendered["img"], img, main_valid_mask) - self.ssim_metric.update(rendered["img"], img, main_valid_mask) - self.lpips_metric.update(rendered["img"], img, main_valid_mask) - - if self.has_bg: - self.fg_psnr_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_ssim_metric.update(rendered["img"], img, fg_valid_mask) - self.fg_lpips_metric.update(rendered["img"], img, fg_valid_mask) - - self.bg_psnr_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_ssim_metric.update(rendered["img"], img, bg_valid_mask) - self.bg_lpips_metric.update(rendered["img"], img, bg_valid_mask) - - # Dump results. - results_dir = osp.join(self.save_dir, "results", "rgb") - os.makedirs(results_dir, exist_ok=True) - iio.imwrite( - osp.join(results_dir, f"{frame_name}.png"), - (rendered["img"][0].cpu().numpy() * 255).astype(np.uint8), - ) - - return { - "val/psnr": self.psnr_metric.compute(), - "val/ssim": self.ssim_metric.compute(), - "val/lpips": self.lpips_metric.compute(), - "val/fg_psnr": self.fg_psnr_metric.compute(), - "val/fg_ssim": self.fg_ssim_metric.compute(), - "val/fg_lpips": self.fg_lpips_metric.compute(), - "val/bg_psnr": self.bg_psnr_metric.compute(), - "val/bg_ssim": self.bg_ssim_metric.compute(), - "val/bg_lpips": self.bg_lpips_metric.compute(), - } - - @torch.no_grad() - def validate_keypoints(self): - if self.val_kpt_loader is None: - return - pred_keypoints_3d_all = [] - time_ids = self.val_kpt_loader.dataset.time_ids.tolist() - h, w = self.val_kpt_loader.dataset.dataset.imgs.shape[1:3] - pred_train_depths = np.zeros((len(time_ids), h, w)) - - for batch in tqdm(self.val_kpt_loader, desc="render val keypoints"): - batch = to_device(batch, self.device) - # (2,). - ts = batch["ts"][0] - # (2, 4, 4). - w2cs = batch["w2cs"][0] - # (2, 3, 3). - Ks = batch["Ks"][0] - # (2, H, W, 3). - imgs = batch["imgs"][0] - # (2, P, 3). - keypoints = batch["keypoints"][0] - # (P,) - keypoint_masks = (keypoints[..., -1] > 0.5).all(dim=0) - src_keypoints, target_keypoints = keypoints[:, keypoint_masks, :2] - W, H = img_wh = imgs.shape[-2:0:-1] - rendered = self.model.render( - ts[0].item(), - w2cs[:1], - Ks[:1], - img_wh, - target_ts=ts[1:], - target_w2cs=w2cs[1:], - return_depth=True, - ) - pred_tracks_3d = rendered["tracks_3d"][0, ..., 0, :] - pred_tracks_2d = torch.einsum("ij,hwj->hwi", Ks[1], pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., -1:], min=1e-6 - ) - pred_keypoints = F.grid_sample( - pred_tracks_2d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - - # Compute metrics. - self.pck_metric.update(pred_keypoints, target_keypoints, max(img_wh) * 0.05) - - padded_keypoints_3d = torch.zeros_like(keypoints[0]) - pred_keypoints_3d = F.grid_sample( - pred_tracks_3d[None].permute(0, 3, 1, 2), - normalize_coords(src_keypoints, H, W)[None, None], - align_corners=True, - ).permute(0, 2, 3, 1)[0, 0] - # Transform 3D keypoints back to world space. - pred_keypoints_3d = torch.einsum( - "ij,pj->pi", - torch.linalg.inv(w2cs[1])[:3], - F.pad(pred_keypoints_3d, (0, 1), value=1.0), - ) - padded_keypoints_3d[keypoint_masks] = pred_keypoints_3d - # Cache predicted keypoints. - pred_keypoints_3d_all.append(padded_keypoints_3d.cpu().numpy()) - pred_train_depths[time_ids.index(ts[0].item())] = ( - rendered["depth"][0, ..., 0].cpu().numpy() - ) - - # Dump unified results. - all_Ks = self.val_kpt_loader.dataset.dataset.Ks - all_w2cs = self.val_kpt_loader.dataset.dataset.w2cs - - keypoint_result_dict = { - "Ks": all_Ks[time_ids].cpu().numpy(), - "w2cs": all_w2cs[time_ids].cpu().numpy(), - "pred_keypoints_3d": np.stack(pred_keypoints_3d_all, 0), - "pred_train_depths": pred_train_depths, - } - - results_dir = osp.join(self.save_dir, "results") - os.makedirs(results_dir, exist_ok=True) - np.savez( - osp.join(results_dir, "keypoints.npz"), - **keypoint_result_dict, - ) - guru.info( - f"Dumped keypoint results to {results_dir=} {keypoint_result_dict['pred_keypoints_3d'].shape=}" - ) - - return {"val/pck": self.pck_metric.compute()} - - @torch.no_grad() - def save_train_videos(self, epoch: int): - if self.train_loader is None: - return - video_dir = osp.join(self.save_dir, "videos", f"epoch_{epoch:04d}") - os.makedirs(video_dir, exist_ok=True) - fps = getattr(self.train_loader.dataset.dataset, "fps", 15.0) - # Render video. - video = [] - ref_pred_depths = [] - masks = [] - depth_min, depth_max = 1e6, 0 - for batch_idx, batch in enumerate( - tqdm(self.train_loader, desc="Rendering video", leave=False) - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (H, W, 3). - img = batch["imgs"][0] - # (H, W). - depth = batch["depths"][0] - - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, return_depth=True, return_mask=True - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - ref_pred_depth = torch.cat( - (depth[..., None], rendered["depth"][0]), dim=1 - ).cpu() - ref_pred_depths.append(ref_pred_depth) - depth_min = min(depth_min, ref_pred_depth.min().item()) - depth_max = max(depth_max, ref_pred_depth.quantile(0.99).item()) - if rendered["mask"] is not None: - masks.append(rendered["mask"][0].cpu().squeeze(-1)) - - # rgb video - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "rgbs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - # depth video - depth_video = torch.stack( - [ - apply_depth_colormap( - ref_pred_depth, near_plane=depth_min, far_plane=depth_max - ) - for ref_pred_depth in ref_pred_depths - ], - dim=0, - ) - iio.mimwrite( - osp.join(video_dir, "depths.mp4"), - make_video_divisble((depth_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - if len(masks) > 0: - # mask video - mask_video = torch.stack(masks, dim=0) - iio.mimwrite( - osp.join(video_dir, "masks.mp4"), - make_video_divisble((mask_video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) - - # Render 2D track video. - tracks_2d, target_imgs = [], [] - sample_interval = 10 - batch0 = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in self.train_loader.dataset[0].items() - } - # (). - t = batch0["ts"] - # (4, 4). - w2c = batch0["w2cs"] - # (3, 3). - K = batch0["Ks"] - # (H, W, 3). - img = batch0["imgs"] - # (H, W). - bool_mask = batch0["masks"] > 0.5 - img_wh = img.shape[-2::-1] - for batch in tqdm( - self.train_loader, desc="Rendering 2D track video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - # (1, H, W, 3). - target_imgs.append(batch["imgs"].cpu()) - # (1,). - target_ts = batch["ts"] - # (1, 4, 4). - target_w2cs = batch["w2cs"] - # (1, 3, 3). - target_Ks = batch["Ks"] - rendered = self.model.render( - t, - w2c[None], - K[None], - img_wh, - target_ts=target_ts, - target_w2cs=target_w2cs, - ) - pred_tracks_3d = rendered["tracks_3d"][0][ - ::sample_interval, ::sample_interval - ][bool_mask[::sample_interval, ::sample_interval]].swapaxes(0, 1) - pred_tracks_2d = torch.einsum("bij,bpj->bpi", target_Ks, pred_tracks_3d) - pred_tracks_2d = pred_tracks_2d[..., :2] / torch.clamp( - pred_tracks_2d[..., 2:], min=1e-6 - ) - tracks_2d.append(pred_tracks_2d.cpu()) - tracks_2d = torch.cat(tracks_2d, dim=0) - target_imgs = torch.cat(target_imgs, dim=0) - track_2d_video = plot_correspondences( - target_imgs.numpy(), - tracks_2d.numpy(), - query_id=cast(int, t), - ) - iio.mimwrite( - osp.join(video_dir, "tracks_2d.mp4"), - make_video_divisble(np.stack(track_2d_video, 0)), - fps=fps, - ) - # Render motion coefficient video. - with torch.random.fork_rng(): - torch.random.manual_seed(0) - motion_coef_colors = torch.pca_lowrank( - self.model.fg.get_coefs()[None], - q=3, - )[0][0] - motion_coef_colors = (motion_coef_colors - motion_coef_colors.min(0)[0]) / ( - motion_coef_colors.max(0)[0] - motion_coef_colors.min(0)[0] - ) - motion_coef_colors = F.pad( - motion_coef_colors, (0, 0, 0, self.model.bg.num_gaussians), value=0.5 - ) - video = [] - for batch in tqdm( - self.train_loader, desc="Rendering motion coefficient video", leave=False - ): - batch = { - k: v.to(self.device) if isinstance(v, torch.Tensor) else v - for k, v in batch.items() - } - # (). - t = batch["ts"][0] - # (4, 4). - w2c = batch["w2cs"][0] - # (3, 3). - K = batch["Ks"][0] - # (3, 3). - img = batch["imgs"][0] - img_wh = img.shape[-2::-1] - rendered = self.model.render( - t, w2c[None], K[None], img_wh, colors_override=motion_coef_colors - ) - # Putting results onto CPU since it will consume unnecessarily - # large GPU memory for long sequence OW. - video.append(torch.cat([img, rendered["img"][0]], dim=1).cpu()) - video = torch.stack(video, dim=0) - iio.mimwrite( - osp.join(video_dir, "motion_coefs.mp4"), - make_video_divisble((video.numpy() * 255).astype(np.uint8)), - fps=fps, - ) diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__init__.py b/som_out/swing/code/2024-10-26-020013/flow3d/vis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/__init__.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7212361a31ebf2bfb8af64e9bd8996b3a76aeb2b..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc deleted file mode 100644 index 3a93c45ff5ae1195bbe5bd9672786fad1809e135..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/playback_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/render_panel.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/render_panel.cpython-310.pyc deleted file mode 100644 index 4ec68ba905c7d346ffcf1b79a7f4f9b5d6d32a9d..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/render_panel.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/utils.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index a13de83df1946336fd531a28f63a855bddcd5609..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/utils.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/viewer.cpython-310.pyc b/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/viewer.cpython-310.pyc deleted file mode 100644 index c876505c80b0a96e975e2c5597207095ae563d70..0000000000000000000000000000000000000000 Binary files a/som_out/swing/code/2024-10-26-020013/flow3d/vis/__pycache__/viewer.cpython-310.pyc and /dev/null differ diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/playback_panel.py b/som_out/swing/code/2024-10-26-020013/flow3d/vis/playback_panel.py deleted file mode 100644 index 20e6185b2dbf2b33aedf8da4ffc56ec35f2641f2..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/vis/playback_panel.py +++ /dev/null @@ -1,68 +0,0 @@ -import threading -import time - -import viser - - -def add_gui_playback_group( - server: viser.ViserServer, - num_frames: int, - min_fps: float = 1.0, - max_fps: float = 60.0, - fps_step: float = 0.1, - initial_fps: float = 10.0, -): - gui_timestep = server.gui.add_slider( - "Timestep", - min=0, - max=num_frames - 1, - step=1, - initial_value=0, - disabled=True, - ) - gui_next_frame = server.gui.add_button("Next Frame") - gui_prev_frame = server.gui.add_button("Prev Frame") - gui_playing_pause = server.gui.add_button("Pause") - gui_playing_pause.visible = False - gui_playing_resume = server.gui.add_button("Resume") - gui_framerate = server.gui.add_slider( - "FPS", min=min_fps, max=max_fps, step=fps_step, initial_value=initial_fps - ) - - # Frame step buttons. - @gui_next_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - - @gui_prev_frame.on_click - def _(_) -> None: - gui_timestep.value = (gui_timestep.value - 1) % num_frames - - # Disable frame controls when we're playing. - def _toggle_gui_playing(_): - gui_playing_pause.visible = not gui_playing_pause.visible - gui_playing_resume.visible = not gui_playing_resume.visible - gui_timestep.disabled = gui_playing_pause.visible - gui_next_frame.disabled = gui_playing_pause.visible - gui_prev_frame.disabled = gui_playing_pause.visible - - gui_playing_pause.on_click(_toggle_gui_playing) - gui_playing_resume.on_click(_toggle_gui_playing) - - # Create a thread to update the timestep indefinitely. - def _update_timestep(): - while True: - if gui_playing_pause.visible: - gui_timestep.value = (gui_timestep.value + 1) % num_frames - time.sleep(1 / gui_framerate.value) - - threading.Thread(target=_update_timestep, daemon=True).start() - - return ( - gui_timestep, - gui_next_frame, - gui_prev_frame, - gui_playing_pause, - gui_playing_resume, - gui_framerate, - ) diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/render_panel.py b/som_out/swing/code/2024-10-26-020013/flow3d/vis/render_panel.py deleted file mode 100644 index 7f581a35cd71117d84a021bfb7bc05fe99eca3ae..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/vis/render_panel.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import colorsys -import dataclasses -import datetime -import json -import threading -import time -from pathlib import Path -from typing import Dict, List, Literal, Optional, Tuple - -import numpy as np -import scipy -import splines -import splines.quaternion -import viser -import viser.transforms as tf - -VISER_SCALE_RATIO = 10.0 - - -@dataclasses.dataclass -class Keyframe: - time: float - position: np.ndarray - wxyz: np.ndarray - override_fov_enabled: bool - override_fov_rad: float - aspect: float - override_transition_enabled: bool - override_transition_sec: Optional[float] - - @staticmethod - def from_camera(time: float, camera: viser.CameraHandle, aspect: float) -> Keyframe: - return Keyframe( - time, - camera.position, - camera.wxyz, - override_fov_enabled=False, - override_fov_rad=camera.fov, - aspect=aspect, - override_transition_enabled=False, - override_transition_sec=None, - ) - - -class CameraPath: - def __init__( - self, server: viser.ViserServer, duration_element: viser.GuiInputHandle[float] - ): - self._server = server - self._keyframes: Dict[int, Tuple[Keyframe, viser.CameraFrustumHandle]] = {} - self._keyframe_counter: int = 0 - self._spline_nodes: List[viser.SceneNodeHandle] = [] - self._camera_edit_panel: Optional[viser.Gui3dContainerHandle] = None - - self._orientation_spline: Optional[splines.quaternion.KochanekBartels] = None - self._position_spline: Optional[splines.KochanekBartels] = None - self._fov_spline: Optional[splines.KochanekBartels] = None - self._time_spline: Optional[splines.KochanekBartels] = None - - self._keyframes_visible: bool = True - - self._duration_element = duration_element - - # These parameters should be overridden externally. - self.loop: bool = False - self.framerate: float = 30.0 - self.tension: float = 0.5 # Tension / alpha term. - self.default_fov: float = 0.0 - self.default_transition_sec: float = 0.0 - self.show_spline: bool = True - - def set_keyframes_visible(self, visible: bool) -> None: - self._keyframes_visible = visible - for keyframe in self._keyframes.values(): - keyframe[1].visible = visible - - def add_camera( - self, keyframe: Keyframe, keyframe_index: Optional[int] = None - ) -> None: - """Add a new camera, or replace an old one if `keyframe_index` is passed in.""" - server = self._server - - # Add a keyframe if we aren't replacing an existing one. - if keyframe_index is None: - keyframe_index = self._keyframe_counter - self._keyframe_counter += 1 - - print( - f"{keyframe.wxyz=} {keyframe.position=} {keyframe_index=} {keyframe.aspect=}" - ) - frustum_handle = server.scene.add_camera_frustum( - f"/render_cameras/{keyframe_index}", - fov=( - keyframe.override_fov_rad - if keyframe.override_fov_enabled - else self.default_fov - ), - aspect=keyframe.aspect, - scale=0.1, - color=(200, 10, 30), - wxyz=keyframe.wxyz, - position=keyframe.position, - visible=self._keyframes_visible, - ) - self._server.scene.add_icosphere( - f"/render_cameras/{keyframe_index}/sphere", - radius=0.03, - color=(200, 10, 30), - ) - - @frustum_handle.on_click - def _(_) -> None: - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=keyframe.position, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_fov = server.gui.add_checkbox( - "Override FOV", initial_value=keyframe.override_fov_enabled - ) - override_fov_degrees = server.gui.add_slider( - "Override FOV (degrees)", - 5.0, - 175.0, - step=0.1, - initial_value=keyframe.override_fov_rad * 180.0 / np.pi, - disabled=not keyframe.override_fov_enabled, - ) - delete_button = server.gui.add_button( - "Delete", color="red", icon=viser.Icon.TRASH - ) - go_to_button = server.gui.add_button("Go to") - close_button = server.gui.add_button("Close") - - @override_fov.on_update - def _(_) -> None: - keyframe.override_fov_enabled = override_fov.value - override_fov_degrees.disabled = not override_fov.value - self.add_camera(keyframe, keyframe_index) - - @override_fov_degrees.on_update - def _(_) -> None: - keyframe.override_fov_rad = override_fov_degrees.value / 180.0 * np.pi - self.add_camera(keyframe, keyframe_index) - - @delete_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - with event.client.gui.add_modal("Confirm") as modal: - event.client.gui.add_markdown("Delete keyframe?") - confirm_button = event.client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = event.client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - - keyframe_id = None - for i, keyframe_tuple in self._keyframes.items(): - if keyframe_tuple[1] is frustum_handle: - keyframe_id = i - break - assert keyframe_id is not None - - self._keyframes.pop(keyframe_id) - frustum_handle.remove() - camera_edit_panel.remove() - self._camera_edit_panel = None - modal.close() - self.update_spline() - - @exit_button.on_click - def _(_) -> None: - modal.close() - - @go_to_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - client = event.client - T_world_current = tf.SE3.from_rotation_and_translation( - tf.SO3(client.camera.wxyz), client.camera.position - ) - T_world_target = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) @ tf.SE3.from_translation(np.array([0.0, 0.0, -0.5])) - - T_current_target = T_world_current.inverse() @ T_world_target - - for j in range(10): - T_world_set = T_world_current @ tf.SE3.exp( - T_current_target.log() * j / 9.0 - ) - - # Important bit: we atomically set both the orientation and the position - # of the camera. - with client.atomic(): - client.camera.wxyz = T_world_set.rotation().wxyz - client.camera.position = T_world_set.translation() - time.sleep(1.0 / 30.0) - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - self._keyframes[keyframe_index] = (keyframe, frustum_handle) - - def update_aspect(self, aspect: float) -> None: - for keyframe_index, frame in self._keyframes.items(): - frame = dataclasses.replace(frame[0], aspect=aspect) - self.add_camera(frame, keyframe_index=keyframe_index) - - def get_aspect(self) -> float: - """Get W/H aspect ratio, which is shared across all keyframes.""" - assert len(self._keyframes) > 0 - return next(iter(self._keyframes.values()))[0].aspect - - def reset(self) -> None: - for frame in self._keyframes.values(): - print(f"removing {frame[1]}") - frame[1].remove() - self._keyframes.clear() - self.update_spline() - print("camera path reset") - - def spline_t_from_t_sec(self, time: np.ndarray) -> np.ndarray: - """From a time value in seconds, compute a t value for our geometric - spline interpolation. An increment of 1 for the latter will move the - camera forward by one keyframe. - - We use a PCHIP spline here to guarantee monotonicity. - """ - transition_times_cumsum = self.compute_transition_times_cumsum() - spline_indices = np.arange(transition_times_cumsum.shape[0]) - - if self.loop: - # In the case of a loop, we pad the spline to match the start/end - # slopes. - interpolator = scipy.interpolate.PchipInterpolator( - x=np.concatenate( - [ - [-(transition_times_cumsum[-1] - transition_times_cumsum[-2])], - transition_times_cumsum, - transition_times_cumsum[-1:] + transition_times_cumsum[1:2], - ], - axis=0, - ), - y=np.concatenate( - [[-1], spline_indices, [spline_indices[-1] + 1]], axis=0 - ), - ) - else: - interpolator = scipy.interpolate.PchipInterpolator( - x=transition_times_cumsum, y=spline_indices - ) - - # Clip to account for floating point error. - return np.clip(interpolator(time), 0, spline_indices[-1]) - - def interpolate_pose_and_fov_rad( - self, normalized_t: float - ) -> Optional[Tuple[tf.SE3, float, float]]: - if len(self._keyframes) < 2: - return None - - self._time_spline = splines.KochanekBartels( - [keyframe[0].time for keyframe in self._keyframes.values()], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - self._fov_spline = splines.KochanekBartels( - [ - ( - keyframe[0].override_fov_rad - if keyframe[0].override_fov_enabled - else self.default_fov - ) - for keyframe in self._keyframes.values() - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - assert self._orientation_spline is not None - assert self._position_spline is not None - assert self._fov_spline is not None - assert self._time_spline is not None - - max_t = self.compute_duration() - t = max_t * normalized_t - spline_t = float(self.spline_t_from_t_sec(np.array(t))) - - quat = self._orientation_spline.evaluate(spline_t) - assert isinstance(quat, splines.quaternion.UnitQuaternion) - return ( - tf.SE3.from_rotation_and_translation( - tf.SO3(np.array([quat.scalar, *quat.vector])), - self._position_spline.evaluate(spline_t), - ), - float(self._fov_spline.evaluate(spline_t)), - float(self._time_spline.evaluate(spline_t)), - ) - - def update_spline(self) -> None: - num_frames = int(self.compute_duration() * self.framerate) - keyframes = list(self._keyframes.values()) - - if num_frames <= 0 or not self.show_spline or len(keyframes) < 2: - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - return - - transition_times_cumsum = self.compute_transition_times_cumsum() - - self._orientation_spline = splines.quaternion.KochanekBartels( - [ - splines.quaternion.UnitQuaternion.from_unit_xyzw( - np.roll(keyframe[0].wxyz, shift=-1) - ) - for keyframe in keyframes - ], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - self._position_spline = splines.KochanekBartels( - [keyframe[0].position for keyframe in keyframes], - tcb=(self.tension, 0.0, 0.0), - endconditions="closed" if self.loop else "natural", - ) - - # Update visualized spline. - points_array = self._position_spline.evaluate( - self.spline_t_from_t_sec( - np.linspace(0, transition_times_cumsum[-1], num_frames) - ) - ) - colors_array = np.array( - [ - colorsys.hls_to_rgb(h, 0.5, 1.0) - for h in np.linspace(0.0, 1.0, len(points_array)) - ] - ) - - # Clear prior spline nodes. - for node in self._spline_nodes: - node.remove() - self._spline_nodes.clear() - - self._spline_nodes.append( - self._server.scene.add_spline_catmull_rom( - "/render_camera_spline", - positions=points_array, - color=(220, 220, 220), - closed=self.loop, - line_width=1.0, - segments=points_array.shape[0] + 1, - ) - ) - self._spline_nodes.append( - self._server.scene.add_point_cloud( - "/render_camera_spline/points", - points=points_array, - colors=colors_array, - point_size=0.04, - ) - ) - - def make_transition_handle(i: int) -> None: - assert self._position_spline is not None - transition_pos = self._position_spline.evaluate( - float( - self.spline_t_from_t_sec( - (transition_times_cumsum[i] + transition_times_cumsum[i + 1]) - / 2.0, - ) - ) - ) - transition_sphere = self._server.scene.add_icosphere( - f"/render_camera_spline/transition_{i}", - radius=0.04, - color=(255, 0, 0), - position=transition_pos, - ) - self._spline_nodes.append(transition_sphere) - - @transition_sphere.on_click - def _(_) -> None: - server = self._server - - if self._camera_edit_panel is not None: - self._camera_edit_panel.remove() - self._camera_edit_panel = None - - keyframe_index = (i + 1) % len(self._keyframes) - keyframe = keyframes[keyframe_index][0] - - with server.scene.add_3d_gui_container( - "/camera_edit_panel", - position=transition_pos, - ) as camera_edit_panel: - self._camera_edit_panel = camera_edit_panel - override_transition_enabled = server.gui.add_checkbox( - "Override transition", - initial_value=keyframe.override_transition_enabled, - ) - override_transition_sec = server.gui.add_number( - "Override transition (sec)", - initial_value=( - keyframe.override_transition_sec - if keyframe.override_transition_sec is not None - else self.default_transition_sec - ), - min=0.001, - max=30.0, - step=0.001, - disabled=not override_transition_enabled.value, - ) - close_button = server.gui.add_button("Close") - - @override_transition_enabled.on_update - def _(_) -> None: - keyframe.override_transition_enabled = ( - override_transition_enabled.value - ) - override_transition_sec.disabled = ( - not override_transition_enabled.value - ) - self._duration_element.value = self.compute_duration() - - @override_transition_sec.on_update - def _(_) -> None: - keyframe.override_transition_sec = override_transition_sec.value - self._duration_element.value = self.compute_duration() - - @close_button.on_click - def _(_) -> None: - assert camera_edit_panel is not None - camera_edit_panel.remove() - self._camera_edit_panel = None - - (num_transitions_plus_1,) = transition_times_cumsum.shape - for i in range(num_transitions_plus_1 - 1): - make_transition_handle(i) - - # for i in range(transition_times.shape[0]) - - def compute_duration(self) -> float: - """Compute the total duration of the trajectory.""" - total = 0.0 - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0 and not self.loop: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - return total - - def compute_transition_times_cumsum(self) -> np.ndarray: - """Compute the total duration of the trajectory.""" - total = 0.0 - out = [0.0] - for i, (keyframe, frustum) in enumerate(self._keyframes.values()): - if i == 0: - continue - del frustum - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - if self.loop: - keyframe = next(iter(self._keyframes.values()))[0] - total += ( - keyframe.override_transition_sec - if keyframe.override_transition_enabled - and keyframe.override_transition_sec is not None - else self.default_transition_sec - ) - out.append(total) - - return np.array(out) - - -@dataclasses.dataclass -class RenderTabState: - """Useful GUI handles exposed by the render tab.""" - - preview_render: bool - preview_fov: float - preview_aspect: float - preview_camera_type: Literal["Perspective", "Fisheye", "Equirectangular"] - - -def populate_render_tab( - server: viser.ViserServer, - datapath: Path, - gui_timestep_handle: viser.GuiInputHandle[int] | None, -) -> RenderTabState: - - render_tab_state = RenderTabState( - preview_render=False, - preview_fov=0.0, - preview_aspect=1.0, - preview_camera_type="Perspective", - ) - - fov_degrees = server.gui.add_slider( - "Default FOV", - initial_value=75.0, - min=0.1, - max=175.0, - step=0.01, - hint="Field-of-view for rendering, which can also be overridden on a per-keyframe basis.", - ) - - @fov_degrees.on_update - def _(_) -> None: - fov_radians = fov_degrees.value / 180.0 * np.pi - for client in server.get_clients().values(): - client.camera.fov = fov_radians - camera_path.default_fov = fov_radians - - # Updating the aspect ratio will also re-render the camera frustums. - # Could rethink this. - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - resolution = server.gui.add_vector2( - "Resolution", - initial_value=(1920, 1080), - min=(50, 50), - max=(10_000, 10_000), - step=1, - hint="Render output resolution in pixels.", - ) - - @resolution.on_update - def _(_) -> None: - camera_path.update_aspect(resolution.value[0] / resolution.value[1]) - compute_and_update_preview_camera_state() - - camera_type = server.gui.add_dropdown( - "Camera type", - ("Perspective", "Fisheye", "Equirectangular"), - initial_value="Perspective", - hint="Camera model to render with. This is applied to all keyframes.", - ) - add_button = server.gui.add_button( - "Add Keyframe", - icon=viser.Icon.PLUS, - hint="Add a new keyframe at the current pose.", - ) - - @add_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - camera = server.get_clients()[event.client_id].camera - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(camera.wxyz), camera.position - ) - print(f"client {event.client_id} at {camera.position} {camera.wxyz}") - print(f"camera pose {pose.as_matrix()}") - if gui_timestep_handle is not None: - print(f"timestep {gui_timestep_handle.value}") - - # Add this camera to the path. - time = 0 - if gui_timestep_handle is not None: - time = gui_timestep_handle.value - camera_path.add_camera( - Keyframe.from_camera( - time, - camera, - aspect=resolution.value[0] / resolution.value[1], - ), - ) - duration_number.value = camera_path.compute_duration() - camera_path.update_spline() - - clear_keyframes_button = server.gui.add_button( - "Clear Keyframes", - icon=viser.Icon.TRASH, - hint="Remove all keyframes from the render path.", - ) - - @clear_keyframes_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client_id is not None - client = server.get_clients()[event.client_id] - with client.atomic(), client.gui.add_modal("Confirm") as modal: - client.gui.add_markdown("Clear all keyframes?") - confirm_button = client.gui.add_button( - "Yes", color="red", icon=viser.Icon.TRASH - ) - exit_button = client.gui.add_button("Cancel") - - @confirm_button.on_click - def _(_) -> None: - camera_path.reset() - modal.close() - - duration_number.value = camera_path.compute_duration() - - # Clear move handles. - if len(transform_controls) > 0: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - @exit_button.on_click - def _(_) -> None: - modal.close() - - loop = server.gui.add_checkbox( - "Loop", False, hint="Add a segment between the first and last keyframes." - ) - - @loop.on_update - def _(_) -> None: - camera_path.loop = loop.value - duration_number.value = camera_path.compute_duration() - - tension_slider = server.gui.add_slider( - "Spline tension", - min=0.0, - max=1.0, - initial_value=0.0, - step=0.01, - hint="Tension parameter for adjusting smoothness of spline interpolation.", - ) - - @tension_slider.on_update - def _(_) -> None: - camera_path.tension = tension_slider.value - camera_path.update_spline() - - move_checkbox = server.gui.add_checkbox( - "Move keyframes", - initial_value=False, - hint="Toggle move handles for keyframes in the scene.", - ) - - transform_controls: List[viser.SceneNodeHandle] = [] - - @move_checkbox.on_update - def _(event: viser.GuiEvent) -> None: - # Clear move handles when toggled off. - if move_checkbox.value is False: - for t in transform_controls: - t.remove() - transform_controls.clear() - return - - def _make_transform_controls_callback( - keyframe: Tuple[Keyframe, viser.SceneNodeHandle], - controls: viser.TransformControlsHandle, - ) -> None: - @controls.on_update - def _(_) -> None: - keyframe[0].wxyz = controls.wxyz - keyframe[0].position = controls.position - - keyframe[1].wxyz = controls.wxyz - keyframe[1].position = controls.position - - camera_path.update_spline() - - # Show move handles. - assert event.client is not None - for keyframe_index, keyframe in camera_path._keyframes.items(): - controls = event.client.scene.add_transform_controls( - f"/keyframe_move/{keyframe_index}", - scale=0.4, - wxyz=keyframe[0].wxyz, - position=keyframe[0].position, - ) - transform_controls.append(controls) - _make_transform_controls_callback(keyframe, controls) - - show_keyframe_checkbox = server.gui.add_checkbox( - "Show keyframes", - initial_value=True, - hint="Show keyframes in the scene.", - ) - - @show_keyframe_checkbox.on_update - def _(_: viser.GuiEvent) -> None: - camera_path.set_keyframes_visible(show_keyframe_checkbox.value) - - show_spline_checkbox = server.gui.add_checkbox( - "Show spline", - initial_value=True, - hint="Show camera path spline in the scene.", - ) - - @show_spline_checkbox.on_update - def _(_) -> None: - camera_path.show_spline = show_spline_checkbox.value - camera_path.update_spline() - - playback_folder = server.gui.add_folder("Playback") - with playback_folder: - play_button = server.gui.add_button("Play", icon=viser.Icon.PLAYER_PLAY) - pause_button = server.gui.add_button( - "Pause", icon=viser.Icon.PLAYER_PAUSE, visible=False - ) - preview_render_button = server.gui.add_button( - "Preview Render", hint="Show a preview of the render in the viewport." - ) - preview_render_stop_button = server.gui.add_button( - "Exit Render Preview", color="red", visible=False - ) - - transition_sec_number = server.gui.add_number( - "Transition (sec)", - min=0.001, - max=30.0, - step=0.001, - initial_value=2.0, - hint="Time in seconds between each keyframe, which can also be overridden on a per-transition basis.", - ) - framerate_number = server.gui.add_number( - "FPS", min=0.1, max=240.0, step=1e-2, initial_value=30.0 - ) - framerate_buttons = server.gui.add_button_group("", ("24", "30", "60")) - duration_number = server.gui.add_number( - "Duration (sec)", - min=0.0, - max=1e8, - step=0.001, - initial_value=0.0, - disabled=True, - ) - - @framerate_buttons.on_click - def _(_) -> None: - framerate_number.value = float(framerate_buttons.value) - - @transition_sec_number.on_update - def _(_) -> None: - camera_path.default_transition_sec = transition_sec_number.value - duration_number.value = camera_path.compute_duration() - - def get_max_frame_index() -> int: - return max(1, int(framerate_number.value * duration_number.value) - 1) - - preview_camera_handle: Optional[viser.SceneNodeHandle] = None - - def remove_preview_camera() -> None: - nonlocal preview_camera_handle - if preview_camera_handle is not None: - preview_camera_handle.remove() - preview_camera_handle = None - - def compute_and_update_preview_camera_state() -> ( - Optional[Tuple[tf.SE3, float, float]] - ): - """Update the render tab state with the current preview camera pose. - Returns current camera pose + FOV if available.""" - - if preview_frame_slider is None: - return - maybe_pose_and_fov_rad_and_time = camera_path.interpolate_pose_and_fov_rad( - preview_frame_slider.value / get_max_frame_index() - ) - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - render_tab_state.preview_fov = fov_rad - render_tab_state.preview_aspect = camera_path.get_aspect() - render_tab_state.preview_camera_type = camera_type.value - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - return pose, fov_rad, time - - def add_preview_frame_slider() -> Optional[viser.GuiInputHandle[int]]: - """Helper for creating the current frame # slider. This is removed and - re-added anytime the `max` value changes.""" - - with playback_folder: - preview_frame_slider = server.gui.add_slider( - "Preview frame", - min=0, - max=get_max_frame_index(), - step=1, - initial_value=0, - # Place right after the pause button. - order=preview_render_stop_button.order + 0.01, - disabled=get_max_frame_index() == 1, - ) - play_button.disabled = preview_frame_slider.disabled - preview_render_button.disabled = preview_frame_slider.disabled - - @preview_frame_slider.on_update - def _(_) -> None: - nonlocal preview_camera_handle - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - return - pose, fov_rad, time = maybe_pose_and_fov_rad_and_time - - preview_camera_handle = server.scene.add_camera_frustum( - "/preview_camera", - fov=fov_rad, - aspect=resolution.value[0] / resolution.value[1], - scale=0.35, - wxyz=pose.rotation().wxyz, - position=pose.translation(), - color=(10, 200, 30), - ) - if render_tab_state.preview_render: - for client in server.get_clients().values(): - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - return preview_frame_slider - - # We back up the camera poses before and after we start previewing renders. - camera_pose_backup_from_id: Dict[int, tuple] = {} - - @preview_render_button.on_click - def _(_) -> None: - render_tab_state.preview_render = True - preview_render_button.visible = False - preview_render_stop_button.visible = True - - maybe_pose_and_fov_rad_and_time = compute_and_update_preview_camera_state() - if maybe_pose_and_fov_rad_and_time is None: - remove_preview_camera() - return - pose, fov, time = maybe_pose_and_fov_rad_and_time - del fov - - # Hide all scene nodes when we're previewing the render. - server.scene.set_global_visibility(True) - - # Back up and then set camera poses. - for client in server.get_clients().values(): - camera_pose_backup_from_id[client.client_id] = ( - client.camera.position, - client.camera.look_at, - client.camera.up_direction, - ) - client.camera.wxyz = pose.rotation().wxyz - client.camera.position = pose.translation() - if gui_timestep_handle is not None: - gui_timestep_handle.value = int(time) - - @preview_render_stop_button.on_click - def _(_) -> None: - render_tab_state.preview_render = False - preview_render_button.visible = True - preview_render_stop_button.visible = False - - # Revert camera poses. - for client in server.get_clients().values(): - if client.client_id not in camera_pose_backup_from_id: - continue - cam_position, cam_look_at, cam_up = camera_pose_backup_from_id.pop( - client.client_id - ) - client.camera.position = cam_position - client.camera.look_at = cam_look_at - client.camera.up_direction = cam_up - client.flush() - - # Un-hide scene nodes. - server.scene.set_global_visibility(True) - - preview_frame_slider = add_preview_frame_slider() - - # Update the # of frames. - @duration_number.on_update - @framerate_number.on_update - def _(_) -> None: - remove_preview_camera() # Will be re-added when slider is updated. - - nonlocal preview_frame_slider - old = preview_frame_slider - assert old is not None - - preview_frame_slider = add_preview_frame_slider() - if preview_frame_slider is not None: - old.remove() - else: - preview_frame_slider = old - - camera_path.framerate = framerate_number.value - camera_path.update_spline() - - # Play the camera trajectory when the play button is pressed. - @play_button.on_click - def _(_) -> None: - play_button.visible = False - pause_button.visible = True - - def play() -> None: - while not play_button.visible: - max_frame = int(framerate_number.value * duration_number.value) - if max_frame > 0: - assert preview_frame_slider is not None - preview_frame_slider.value = ( - preview_frame_slider.value + 1 - ) % max_frame - time.sleep(1.0 / framerate_number.value) - - threading.Thread(target=play).start() - - # Play the camera trajectory when the play button is pressed. - @pause_button.on_click - def _(_) -> None: - play_button.visible = True - pause_button.visible = False - - # add button for loading existing path - load_camera_path_button = server.gui.add_button( - "Load Path", icon=viser.Icon.FOLDER_OPEN, hint="Load an existing camera path." - ) - - @load_camera_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - camera_path_dir = datapath.parent - camera_path_dir.mkdir(parents=True, exist_ok=True) - preexisting_camera_paths = list(camera_path_dir.glob("*.json")) - preexisting_camera_filenames = [p.name for p in preexisting_camera_paths] - - with event.client.gui.add_modal("Load Path") as modal: - if len(preexisting_camera_filenames) == 0: - event.client.gui.add_markdown("No existing paths found") - else: - event.client.gui.add_markdown("Select existing camera path:") - camera_path_dropdown = event.client.gui.add_dropdown( - label="Camera Path", - options=[str(p) for p in preexisting_camera_filenames], - initial_value=str(preexisting_camera_filenames[0]), - ) - load_button = event.client.gui.add_button("Load") - - @load_button.on_click - def _(_) -> None: - # load the json file - json_path = datapath / camera_path_dropdown.value - with open(json_path, "r") as f: - json_data = json.load(f) - - keyframes = json_data["keyframes"] - camera_path.reset() - for i in range(len(keyframes)): - frame = keyframes[i] - pose = tf.SE3.from_matrix( - np.array(frame["matrix"]).reshape(4, 4) - ) - # apply the x rotation by 180 deg - pose = tf.SE3.from_rotation_and_translation( - pose.rotation() @ tf.SO3.from_x_radians(np.pi), - pose.translation(), - ) - - camera_path.add_camera( - Keyframe( - frame["time"], - position=pose.translation(), - wxyz=pose.rotation().wxyz, - # There are some floating point conversions between degrees and radians, so the fov and - # default_Fov values will not be exactly matched. - override_fov_enabled=abs( - frame["fov"] - json_data.get("default_fov", 0.0) - ) - > 1e-3, - override_fov_rad=frame["fov"] / 180.0 * np.pi, - aspect=frame["aspect"], - override_transition_enabled=frame.get( - "override_transition_enabled", None - ), - override_transition_sec=frame.get( - "override_transition_sec", None - ), - ) - ) - - transition_sec_number.value = json_data.get( - "default_transition_sec", 0.5 - ) - - # update the render name - camera_path_name.value = json_path.stem - camera_path.update_spline() - modal.close() - - cancel_button = event.client.gui.add_button("Cancel") - - @cancel_button.on_click - def _(_) -> None: - modal.close() - - # set the initial value to the current date-time string - now = datetime.datetime.now() - camera_path_name = server.gui.add_text( - "Camera path name", - initial_value=now.strftime("%Y-%m-%d %H:%M:%S"), - hint="Name of the render", - ) - - save_path_button = server.gui.add_button( - "Save Camera Path", - color="green", - icon=viser.Icon.FILE_EXPORT, - hint="Save the camera path to json.", - ) - - reset_up_button = server.gui.add_button( - "Reset Up Direction", - icon=viser.Icon.ARROW_BIG_UP_LINES, - color="gray", - hint="Set the up direction of the camera orbit controls to the camera's current up direction.", - ) - - @reset_up_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - event.client.camera.up_direction = tf.SO3(event.client.camera.wxyz) @ np.array( - [0.0, -1.0, 0.0] - ) - - @save_path_button.on_click - def _(event: viser.GuiEvent) -> None: - assert event.client is not None - num_frames = int(framerate_number.value * duration_number.value) - json_data = {} - # json data has the properties: - # keyframes: list of keyframes with - # matrix : flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # camera_type: string of camera type - # render_height: int - # render_width: int - # fps: int - # seconds: float - # is_cycle: bool - # smoothness_value: float - # camera_path: list of frames with properties - # camera_to_world: flattened 4x4 matrix - # fov: float in degrees - # aspect: float - # first populate the keyframes: - keyframes = [] - for keyframe, dummy in camera_path._keyframes.values(): - pose = tf.SE3.from_rotation_and_translation( - tf.SO3(keyframe.wxyz), keyframe.position - ) - keyframes.append( - { - "matrix": pose.as_matrix().flatten().tolist(), - "fov": ( - np.rad2deg(keyframe.override_fov_rad) - if keyframe.override_fov_enabled - else fov_degrees.value - ), - "aspect": keyframe.aspect, - "override_transition_enabled": keyframe.override_transition_enabled, - "override_transition_sec": keyframe.override_transition_sec, - } - ) - json_data["default_fov"] = fov_degrees.value - json_data["default_transition_sec"] = transition_sec_number.value - json_data["keyframes"] = keyframes - json_data["camera_type"] = camera_type.value.lower() - json_data["render_height"] = resolution.value[1] - json_data["render_width"] = resolution.value[0] - json_data["fps"] = framerate_number.value - json_data["seconds"] = duration_number.value - json_data["is_cycle"] = loop.value - json_data["smoothness_value"] = tension_slider.value - - def get_intrinsics(W, H, fov): - focal = 0.5 * H / np.tan(0.5 * fov) - return np.array( - [[focal, 0.0, 0.5 * W], [0.0, focal, 0.5 * H], [0.0, 0.0, 1.0]] - ) - - # now populate the camera path: - camera_path_list = [] - for i in range(num_frames): - maybe_pose_and_fov_and_time = camera_path.interpolate_pose_and_fov_rad( - i / num_frames - ) - if maybe_pose_and_fov_and_time is None: - return - pose, fov, time = maybe_pose_and_fov_and_time - H = resolution.value[1] - W = resolution.value[0] - K = get_intrinsics(W, H, fov) - # rotate the axis of the camera 180 about x axis - w2c = pose.inverse().as_matrix() - camera_path_list.append( - { - "time": time, - "w2c": w2c.flatten().tolist(), - "K": K.flatten().tolist(), - "img_wh": (W, H), - } - ) - json_data["camera_path"] = camera_path_list - - # now write the json file - out_name = camera_path_name.value - json_outfile = datapath / f"{out_name}.json" - datapath.mkdir(parents=True, exist_ok=True) - print(f"writing to {json_outfile}") - with open(json_outfile.absolute(), "w") as outfile: - json.dump(json_data, outfile) - - camera_path = CameraPath(server, duration_number) - camera_path.default_fov = fov_degrees.value / 180.0 * np.pi - camera_path.default_transition_sec = transition_sec_number.value - - return render_tab_state - - -if __name__ == "__main__": - populate_render_tab( - server=viser.ViserServer(), - datapath=Path("."), - gui_timestep_handle=None, - ) - while True: - time.sleep(10.0) diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/utils.py b/som_out/swing/code/2024-10-26-020013/flow3d/vis/utils.py deleted file mode 100644 index 4e5d6df8596ccf91b19e63d6b26c268336486fb3..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/vis/utils.py +++ /dev/null @@ -1,544 +0,0 @@ -import colorsys -from typing import cast - -import cv2 -import numpy as np - -# import nvdiffrast.torch as dr -import torch -import torch.nn.functional as F -from matplotlib import colormaps -from viser import ViserServer - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class VisManager(metaclass=Singleton): - _servers = {} - - -def get_server(port: int | None = None) -> ViserServer: - manager = VisManager() - if port is None: - avail_ports = list(manager._servers.keys()) - port = avail_ports[0] if len(avail_ports) > 0 else 8890 - if port not in manager._servers: - manager._servers[port] = ViserServer(port=port, verbose=False) - return manager._servers[port] - - -def project_2d_tracks(tracks_3d_w, Ks, T_cw, return_depth=False): - """ - :param tracks_3d_w (torch.Tensor): (T, N, 3) - :param Ks (torch.Tensor): (T, 3, 3) - :param T_cw (torch.Tensor): (T, 4, 4) - :returns tracks_2d (torch.Tensor): (T, N, 2) - """ - tracks_3d_c = torch.einsum( - "tij,tnj->tni", T_cw, F.pad(tracks_3d_w, (0, 1), value=1) - )[..., :3] - tracks_3d_v = torch.einsum("tij,tnj->tni", Ks, tracks_3d_c) - if return_depth: - return ( - tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5), - tracks_3d_v[..., 2], - ) - return tracks_3d_v[..., :2] / torch.clamp(tracks_3d_v[..., 2:], min=1e-5) - - -def draw_keypoints_video( - imgs, kps, colors=None, occs=None, cmap: str = "gist_rainbow", radius: int = 3 -): - """ - :param imgs (np.ndarray): (T, H, W, 3) uint8 [0, 255] - :param kps (np.ndarray): (N, T, 2) - :param colors (np.ndarray): (N, 3) float [0, 1] - :param occ (np.ndarray): (N, T) bool - return out_frames (T, H, W, 3) - """ - if colors is None: - label = np.linspace(0, 1, kps.shape[0]) - colors = np.asarray(colormaps.get_cmap(cmap)(label))[..., :3] - out_frames = [] - for t in range(len(imgs)): - occ = occs[:, t] if occs is not None else None - vis = draw_keypoints_cv2(imgs[t], kps[:, t], colors, occ, radius=radius) - out_frames.append(vis) - return out_frames - - -def draw_keypoints_cv2(img, kps, colors=None, occs=None, radius=3): - """ - :param img (H, W, 3) - :param kps (N, 2) - :param occs (N) - :param colors (N, 3) from 0 to 1 - """ - out_img = img.copy() - kps = kps.round().astype("int").tolist() - if colors is not None: - colors = (255 * colors).astype("int").tolist() - for n in range(len(kps)): - kp = kps[n] - color = colors[n] if colors is not None else (255, 0, 0) - thickness = -1 if occs is None or occs[n] == 0 else 1 - out_img = cv2.circle(out_img, kp, radius, color, thickness, cv2.LINE_AA) - return out_img - - -def draw_tracks_2d( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 2, - track_line_width: int = 1, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - # (H, W, 3). - img_np = (img.cpu().numpy() * 255.0).astype(np.uint8) - # (P, N, 2). - tracks_2d_np = tracks_2d.cpu().numpy() - - num_tracks, num_frames = tracks_2d_np.shape[:2] - - canvas = img_np.copy() - for i in range(num_frames - 1): - alpha = max(1 - 0.9 * ((num_frames - 1 - i) / (num_frames * 0.99)), 0.1) - img_curr = canvas.copy() - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - color_alpha = 1 - hsv = colorsys.rgb_to_hsv(color[0], color[1], color[2]) - color = colorsys.hsv_to_rgb(hsv[0], hsv[1] * color_alpha, hsv[2]) - pt1 = tracks_2d_np[j, i] - pt2 = tracks_2d_np[j, i + 1] - p1 = (int(round(pt1[0])), int(round(pt1[1]))) - p2 = (int(round(pt2[0])), int(round(pt2[1]))) - img_curr = cv2.line( - img_curr, - p1, - p2, - color, - thickness=track_line_width, - lineType=cv2.LINE_AA, - ) - canvas = cv2.addWeighted(img_curr, alpha, canvas, 1 - alpha, 0) - - for j in range(num_tracks): - color = tuple(np.array(cmap(j / max(1, float(num_tracks - 1)))[:3]) * 255) - pt = tracks_2d_np[j, -1] - pt = (int(round(pt[0])), int(round(pt[1]))) - canvas = cv2.circle( - canvas, - pt, - track_point_size, - color, - thickness=-1, - lineType=cv2.LINE_AA, - ) - - return canvas - - -def generate_line_verts_faces(starts, ends, line_width): - """ - Args: - starts: (P, N, 2). - ends: (P, N, 2). - line_width: int. - - Returns: - verts: (P * N * 4, 2). - faces: (P * N * 2, 3). - """ - P, N, _ = starts.shape - - directions = F.normalize(ends - starts, dim=-1) - deltas = ( - torch.cat([-directions[..., 1:], directions[..., :1]], dim=-1) - * line_width - / 2.0 - ) - v0 = starts + deltas - v1 = starts - deltas - v2 = ends + deltas - v3 = ends - deltas - verts = torch.stack([v0, v1, v2, v3], dim=-2) - verts = verts.reshape(-1, 2) - - faces = [] - for p in range(P): - for n in range(N): - base_index = p * N * 4 + n * 4 - # Two triangles per rectangle: (0, 1, 2) and (2, 1, 3) - faces.append([base_index, base_index + 1, base_index + 2]) - faces.append([base_index + 2, base_index + 1, base_index + 3]) - faces = torch.as_tensor(faces, device=starts.device) - - return verts, faces - - -def generate_point_verts_faces(points, point_size, num_segments=10): - """ - Args: - points: (P, 2). - point_size: int. - num_segments: int. - - Returns: - verts: (P * (num_segments + 1), 2). - faces: (P * num_segments, 3). - """ - P, _ = points.shape - - angles = torch.linspace(0, 2 * torch.pi, num_segments + 1, device=points.device)[ - ..., :-1 - ] - unit_circle = torch.stack([torch.cos(angles), torch.sin(angles)], dim=1) - scaled_circles = (point_size / 2.0) * unit_circle - scaled_circles = scaled_circles[None].repeat(P, 1, 1) - verts = points[:, None] + scaled_circles - verts = torch.cat([verts, points[:, None]], dim=1) - verts = verts.reshape(-1, 2) - - faces = F.pad( - torch.as_tensor( - [[i, (i + 1) % num_segments] for i in range(num_segments)], - device=points.device, - ), - (0, 1), - value=num_segments, - ) - faces = faces[None, :] + torch.arange(P, device=points.device)[:, None, None] * ( - num_segments + 1 - ) - faces = faces.reshape(-1, 3) - - return verts, faces - - -def pixel_to_verts_clip(pixels, img_wh, z: float | torch.Tensor = 0.0, w=1.0): - verts_clip = pixels / pixels.new_tensor(img_wh) * 2.0 - 1.0 - w = torch.full_like(verts_clip[..., :1], w) - verts_clip = torch.cat([verts_clip, z * w, w], dim=-1) - return verts_clip - - -def draw_tracks_2d_th( - img: torch.Tensor, - tracks_2d: torch.Tensor, - track_point_size: int = 5, - track_point_segments: int = 16, - track_line_width: int = 2, - cmap_name: str = "gist_rainbow", -): - cmap = colormaps.get_cmap(cmap_name) - CTX = dr.RasterizeCudaContext() - - W, H = img.shape[1], img.shape[0] - if W % 8 != 0 or H % 8 != 0: - # Make sure img is divisible by 8. - img = F.pad( - img, - ( - 0, - 0, - 0, - 8 - W % 8 if W % 8 != 0 else 0, - 0, - 8 - H % 8 if H % 8 != 0 else 0, - ), - value=0.0, - ) - num_tracks, num_frames = tracks_2d.shape[:2] - - track_colors = torch.tensor( - [cmap(j / max(1, float(num_tracks - 1)))[:3] for j in range(num_tracks)], - device=img.device, - ).float() - - # Generate line verts. - verts_l, faces_l = generate_line_verts_faces( - tracks_2d[:, :-1], tracks_2d[:, 1:], track_line_width - ) - # Generate point verts. - verts_p, faces_p = generate_point_verts_faces( - tracks_2d[:, -1], track_point_size, track_point_segments - ) - - verts = torch.cat([verts_l, verts_p], dim=0) - faces = torch.cat([faces_l, faces_p + len(verts_l)], dim=0) - vert_colors = torch.cat( - [ - ( - track_colors[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 3) - ), - ( - track_colors[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 3) - ), - ], - dim=0, - ) - track_zs = torch.linspace(0.0, 1.0, num_tracks, device=img.device)[:, None] - vert_zs = torch.cat( - [ - ( - track_zs[:, None] - .repeat_interleave(4 * (num_frames - 1), dim=1) - .reshape(-1, 1) - ), - ( - track_zs[:, None] - .repeat_interleave(track_point_segments + 1, dim=1) - .reshape(-1, 1) - ), - ], - dim=0, - ) - track_alphas = torch.linspace( - max(0.1, 1.0 - (num_frames - 1) * 0.1), 1.0, num_frames, device=img.device - ) - vert_alphas = torch.cat( - [ - ( - track_alphas[None, :-1, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(4, dim=-2) - .reshape(-1, 1) - ), - ( - track_alphas[None, -1:, None] - .repeat_interleave(num_tracks, dim=0) - .repeat_interleave(track_point_segments + 1, dim=-2) - .reshape(-1, 1) - ), - ], - dim=0, - ) - - # Small trick to always render one track in front of the other. - verts_clip = pixel_to_verts_clip(verts, (img.shape[1], img.shape[0]), vert_zs) - faces_int32 = faces.to(torch.int32) - - rast, _ = cast( - tuple, - dr.rasterize(CTX, verts_clip[None], faces_int32, (img.shape[0], img.shape[1])), - ) - rgba = cast( - torch.Tensor, - dr.interpolate( - torch.cat([vert_colors, vert_alphas], dim=-1).contiguous(), - rast, - faces_int32, - ), - )[0] - rgba = cast(torch.Tensor, dr.antialias(rgba, rast, verts_clip, faces_int32))[ - 0 - ].clamp(0, 1) - # Compose. - color = rgba[..., :-1] * rgba[..., -1:] + (1.0 - rgba[..., -1:]) * img - - # Unpad. - color = color[:H, :W] - - return (color.cpu().numpy() * 255.0).astype(np.uint8) - - -def make_video_divisble( - video: torch.Tensor | np.ndarray, block_size=16 -) -> torch.Tensor | np.ndarray: - H, W = video.shape[1:3] - H_new = H - H % block_size - W_new = W - W % block_size - return video[:, :H_new, :W_new] - - -def apply_float_colormap(img: torch.Tensor, colormap: str = "turbo") -> torch.Tensor: - """Convert single channel to a color img. - - Args: - img (torch.Tensor): (..., 1) float32 single channel image. - colormap (str): Colormap for img. - - Returns: - (..., 3) colored img with colors in [0, 1]. - """ - img = torch.nan_to_num(img, 0) - if colormap == "gray": - return img.repeat(1, 1, 3) - img_long = (img * 255).long() - img_long_min = torch.min(img_long) - img_long_max = torch.max(img_long) - assert img_long_min >= 0, f"the min value is {img_long_min}" - assert img_long_max <= 255, f"the max value is {img_long_max}" - return torch.tensor( - colormaps[colormap].colors, # type: ignore - device=img.device, - )[img_long[..., 0]] - - -def apply_depth_colormap( - depth: torch.Tensor, - acc: torch.Tensor | None = None, - near_plane: float | None = None, - far_plane: float | None = None, -) -> torch.Tensor: - """Converts a depth image to color for easier analysis. - - Args: - depth (torch.Tensor): (..., 1) float32 depth. - acc (torch.Tensor | None): (..., 1) optional accumulation mask. - near_plane: Closest depth to consider. If None, use min image value. - far_plane: Furthest depth to consider. If None, use max image value. - - Returns: - (..., 3) colored depth image with colors in [0, 1]. - """ - near_plane = near_plane or float(torch.min(depth)) - far_plane = far_plane or float(torch.max(depth)) - depth = (depth - near_plane) / (far_plane - near_plane + 1e-10) - depth = torch.clip(depth, 0.0, 1.0) - img = apply_float_colormap(depth, colormap="turbo") - if acc is not None: - img = img * acc + (1.0 - acc) - return img - - -def float2uint8(x): - return (255.0 * x).astype(np.uint8) - - -def uint82float(img): - return np.ascontiguousarray(img) / 255.0 - - -def drawMatches( - img1, - img2, - kp1, - kp2, - num_vis=200, - center=None, - idx_vis=None, - radius=2, - seed=1234, - mask=None, -): - num_pts = len(kp1) - if idx_vis is None: - if num_vis < num_pts: - rng = np.random.RandomState(seed) - idx_vis = rng.choice(num_pts, num_vis, replace=False) - else: - idx_vis = np.arange(num_pts) - - kp1_vis = kp1[idx_vis] - kp2_vis = kp2[idx_vis] - - h1, w1 = img1.shape[:2] - h2, w2 = img2.shape[:2] - - kp1_vis[:, 0] = np.clip(kp1_vis[:, 0], a_min=0, a_max=w1 - 1) - kp1_vis[:, 1] = np.clip(kp1_vis[:, 1], a_min=0, a_max=h1 - 1) - - kp2_vis[:, 0] = np.clip(kp2_vis[:, 0], a_min=0, a_max=w2 - 1) - kp2_vis[:, 1] = np.clip(kp2_vis[:, 1], a_min=0, a_max=h2 - 1) - - img1 = float2uint8(img1) - img2 = float2uint8(img2) - - if center is None: - center = np.median(kp1, axis=0) - - set_max = range(128) - colors = {m: i for i, m in enumerate(set_max)} - hsv = colormaps.get_cmap("hsv") - colors = { - m: (255 * np.array(hsv(i / float(len(colors))))[:3][::-1]).astype(np.int32) - for m, i in colors.items() - } - - if mask is not None: - ind = np.argsort(mask)[::-1] - kp1_vis = kp1_vis[ind] - kp2_vis = kp2_vis[ind] - mask = mask[ind] - - for i, (pt1, pt2) in enumerate(zip(kp1_vis, kp2_vis)): - # random_color = tuple(np.random.randint(low=0, high=255, size=(3,)).tolist()) - coord_angle = np.arctan2(pt1[1] - center[1], pt1[0] - center[0]) - corr_color = np.int32(64 * coord_angle / np.pi) % 128 - color = tuple(colors[corr_color].tolist()) - - if ( - (pt1[0] <= w1 - 1) - and (pt1[0] >= 0) - and (pt1[1] <= h1 - 1) - and (pt1[1] >= 0) - ): - img1 = cv2.circle( - img1, (int(pt1[0]), int(pt1[1])), radius, color, -1, cv2.LINE_AA - ) - if ( - (pt2[0] <= w2 - 1) - and (pt2[0] >= 0) - and (pt2[1] <= h2 - 1) - and (pt2[1] >= 0) - ): - if mask is not None and mask[i]: - continue - # img2 = cv2.drawMarker(img2, (int(pt2[0]), int(pt2[1])), color, markerType=cv2.MARKER_CROSS, - # markerSize=int(5*radius), thickness=int(radius/2), line_type=cv2.LINE_AA) - else: - img2 = cv2.circle( - img2, (int(pt2[0]), int(pt2[1])), radius, color, -1, cv2.LINE_AA - ) - - out = np.concatenate([img1, img2], axis=1) - return out - - -def plot_correspondences( - rgbs, kpts, query_id=0, masks=None, num_vis=1000000, radius=3, seed=1234 -): - num_rgbs = len(rgbs) - rng = np.random.RandomState(seed) - permutation = rng.permutation(kpts.shape[1]) - kpts = kpts[:, permutation, :][:, :num_vis] - if masks is not None: - masks = masks[:, permutation][:, :num_vis] - - rgbq = rgbs[query_id] # [h, w, 3] - kptsq = kpts[query_id] # [n, 2] - - frames = [] - for i in range(num_rgbs): - rgbi = rgbs[i] - kptsi = kpts[i] - if masks is not None: - maski = masks[i] - else: - maski = None - frame = drawMatches( - rgbq, - rgbi, - kptsq, - kptsi, - mask=maski, - num_vis=num_vis, - radius=radius, - seed=seed, - ) - frames.append(frame) - return frames diff --git a/som_out/swing/code/2024-10-26-020013/flow3d/vis/viewer.py b/som_out/swing/code/2024-10-26-020013/flow3d/vis/viewer.py deleted file mode 100644 index 3f786110e1bd0dafcf71fc05c3048a4b8b2dc5cc..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/flow3d/vis/viewer.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path -from typing import Callable, Literal, Optional, Tuple, Union - -import numpy as np -from jaxtyping import Float32, UInt8 -from nerfview import CameraState, Viewer -from viser import Icon, ViserServer - -from flow3d.vis.playback_panel import add_gui_playback_group -from flow3d.vis.render_panel import populate_render_tab - - -class DynamicViewer(Viewer): - def __init__( - self, - server: ViserServer, - render_fn: Callable[ - [CameraState, Tuple[int, int]], - Union[ - UInt8[np.ndarray, "H W 3"], - Tuple[UInt8[np.ndarray, "H W 3"], Optional[Float32[np.ndarray, "H W"]]], - ], - ], - num_frames: int, - work_dir: str, - mode: Literal["rendering", "training"] = "rendering", - ): - self.num_frames = num_frames - self.work_dir = Path(work_dir) - super().__init__(server, render_fn, mode) - - def _define_guis(self): - super()._define_guis() - server = self.server - self._time_folder = server.gui.add_folder("Time") - with self._time_folder: - self._playback_guis = add_gui_playback_group( - server, - num_frames=self.num_frames, - initial_fps=15.0, - ) - self._playback_guis[0].on_update(self.rerender) - self._canonical_checkbox = server.gui.add_checkbox("Canonical", False) - self._canonical_checkbox.on_update(self.rerender) - - _cached_playback_disabled = [] - - def _toggle_gui_playing(event): - if event.target.value: - nonlocal _cached_playback_disabled - _cached_playback_disabled = [ - gui.disabled for gui in self._playback_guis - ] - target_disabled = [True] * len(self._playback_guis) - else: - target_disabled = _cached_playback_disabled - for gui, disabled in zip(self._playback_guis, target_disabled): - gui.disabled = disabled - - self._canonical_checkbox.on_update(_toggle_gui_playing) - - self._render_track_checkbox = server.gui.add_checkbox("Render tracks", False) - self._render_track_checkbox.on_update(self.rerender) - - tabs = server.gui.add_tab_group() - with tabs.add_tab("Render", Icon.CAMERA): - self.render_tab_state = populate_render_tab( - server, Path(self.work_dir) / "camera_paths", self._playback_guis[0] - ) diff --git a/som_out/swing/code/2024-10-26-020013/scripts/batch_eval_ours_iphone_gcp.sh b/som_out/swing/code/2024-10-26-020013/scripts/batch_eval_ours_iphone_gcp.sh deleted file mode 100644 index 94a7f379bb36b5e4b9c4be70d8905b3563df3c93..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/scripts/batch_eval_ours_iphone_gcp.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -EXPNAME=$1 - -seq_names=("apple" "backpack" "block" "creeper" "handwavy" "haru-sit" "mochi-high-five" "paper-windmill" "pillow" "spin" "sriracha-tree" "teddy") -out_dir="/mnt/out/$EXPNAME" -for seq_name in "${seq_names[@]}"; do - seq_dir="$out_dir/$seq_name" - mkdir -p $seq_dir - gsutil -mq cp -r "gs://xcloud-shared/qianqianwang/flow3d/ours/iphone/$EXPNAME/${seq_name}/results" $seq_dir - done - -python scripts/evaluate_iphone.py --data_dir /home/qianqianwang_google_com/datasets/iphone/dycheck --result_dir /mnt/out/$EXPNAME \ No newline at end of file diff --git a/som_out/swing/code/2024-10-26-020013/scripts/evaluate_iphone.py b/som_out/swing/code/2024-10-26-020013/scripts/evaluate_iphone.py deleted file mode 100644 index f7649f2c61629da5ad193f7484abdac243eb5dde..0000000000000000000000000000000000000000 --- a/som_out/swing/code/2024-10-26-020013/scripts/evaluate_iphone.py +++ /dev/null @@ -1,447 +0,0 @@ -import argparse -import json -import os.path as osp -from glob import glob -from itertools import product - -import cv2 -import imageio.v3 as iio -import numpy as np -import roma -import torch -from tqdm import tqdm - -from flow3d.data.colmap import get_colmap_camera_params -from flow3d.metrics import mLPIPS, mPSNR, mSSIM -from flow3d.transforms import rt_to_mat4, solve_procrustes - -parser = argparse.ArgumentParser() -parser.add_argument( - "--data_dir", - type=str, - help="Path to the data directory that contains all the sequences.", -) -parser.add_argument( - "--result_dir", - type=str, - help="Path to the result directory that contains the results." - "for batch evaluation, result_dir should contain subdirectories for each sequence. (result_dir/seq_name/results)" - "for single sequence evaluation, result_dir should contain results directly (result_dir/results)", -) -parser.add_argument( - "--seq_names", - type=str, - nargs="+", - default=[ - "apple", - "backpack", - "block", - "creeper", - "handwavy", - "haru-sit", - "mochi-high-five", - "paper-windmill", - "pillow", - "spin", - "sriracha-tree", - "teddy", - ], - help="Sequence names to evaluate.", -) -args = parser.parse_args() - - -def load_data_dict(data_dir, train_names, val_names): - val_imgs = np.array( - [iio.imread(osp.join(data_dir, "rgb/1x", f"{name}.png")) for name in val_names] - ) - val_covisibles = np.array( - [ - iio.imread( - osp.join( - data_dir, "flow3d_preprocessed/covisible/1x/val/", f"{name}.png" - ) - ) - for name in tqdm(val_names, desc="Loading val covisibles") - ] - ) - train_depths = np.array( - [ - np.load(osp.join(data_dir, "depth/1x", f"{name}.npy"))[..., 0] - for name in train_names - ] - ) - train_Ks, train_w2cs = get_colmap_camera_params( - osp.join(data_dir, "flow3d_preprocessed/colmap/sparse/"), - [name + ".png" for name in train_names], - ) - train_Ks = train_Ks[:, :3, :3] - scale = np.load(osp.join(data_dir, "flow3d_preprocessed/colmap/scale.npy")).item() - train_c2ws = np.linalg.inv(train_w2cs) - train_c2ws[:, :3, -1] *= scale - train_w2cs = np.linalg.inv(train_c2ws) - keypoint_paths = sorted(glob(osp.join(data_dir, "keypoint/2x/train/0_*.json"))) - keypoints_2d = [] - for keypoint_path in keypoint_paths: - with open(keypoint_path) as f: - keypoints_2d.append(json.load(f)) - keypoints_2d = np.array(keypoints_2d) - keypoints_2d[..., :2] *= 2.0 - time_ids = np.array( - [int(osp.basename(p).split("_")[1].split(".")[0]) for p in keypoint_paths] - ) - time_pairs = np.array(list(product(time_ids, repeat=2))) - index_pairs = np.array(list(product(range(len(time_ids)), repeat=2))) - keypoints_3d = [] - for i, kps_2d in zip(time_ids, keypoints_2d): - K = train_Ks[i] - w2c = train_w2cs[i] - depth = train_depths[i] - is_kp_visible = kps_2d[:, 2] == 1 - is_depth_valid = ( - cv2.remap( - (depth != 0).astype(np.float32), - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - == 1 - ) - kp_depths = cv2.remap( - depth, # type: ignore - kps_2d[None, :, :2].astype(np.float32), - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - kps_3d = ( - np.einsum( - "ij,pj->pi", - np.linalg.inv(K), - np.pad(kps_2d[:, :2], ((0, 0), (0, 1)), constant_values=1), - ) - * kp_depths[:, None] - ) - kps_3d = np.einsum( - "ij,pj->pi", - np.linalg.inv(w2c)[:3], - np.pad(kps_3d, ((0, 0), (0, 1)), constant_values=1), - ) - kps_3d = np.concatenate( - [kps_3d, (is_kp_visible & is_depth_valid)[:, None]], axis=1 - ) - kps_3d[kps_3d[:, -1] != 1] = 0.0 - keypoints_3d.append(kps_3d) - keypoints_3d = np.array(keypoints_3d) - return { - "val_imgs": val_imgs, - "val_covisibles": val_covisibles, - "train_depths": train_depths, - "train_Ks": train_Ks, - "train_w2cs": train_w2cs, - "keypoints_2d": keypoints_2d, - "keypoints_3d": keypoints_3d, - "time_ids": time_ids, - "time_pairs": time_pairs, - "index_pairs": index_pairs, - } - - -def load_result_dict(result_dir, val_names): - try: - pred_val_imgs = np.array( - [ - iio.imread(osp.join(result_dir, "rgb", f"{name}.png")) - for name in val_names - ] - ) - except: - pred_val_imgs = None - try: - keypoints_dict = np.load( - osp.join(result_dir, "keypoints.npz"), allow_pickle=True - ) - if len(keypoints_dict) == 1 and "arr_0" in keypoints_dict: - keypoints_dict = keypoints_dict["arr_0"].item() - pred_keypoint_Ks = keypoints_dict["Ks"] - pred_keypoint_w2cs = keypoints_dict["w2cs"] - pred_keypoints_3d = keypoints_dict["pred_keypoints_3d"] - pred_train_depths = keypoints_dict["pred_train_depths"] - except: - print( - "No keypoints.npz found, make sure that it's the method itself cannot produce keypoints." - ) - keypoints_dict = {} - pred_keypoint_Ks = None - pred_keypoint_w2cs = None - pred_keypoints_3d = None - pred_train_depths = None - - if "visibilities" in list(keypoints_dict.keys()): - pred_visibilities = keypoints_dict["visibilities"] - else: - pred_visibilities = None - - return { - "pred_val_imgs": pred_val_imgs, - "pred_train_depths": pred_train_depths, - "pred_keypoint_Ks": pred_keypoint_Ks, - "pred_keypoint_w2cs": pred_keypoint_w2cs, - "pred_keypoints_3d": pred_keypoints_3d, - "pred_visibilities": pred_visibilities, - } - - -def evaluate_3d_tracking(data_dict, result_dict): - train_Ks = data_dict["train_Ks"] - train_w2cs = data_dict["train_w2cs"] - keypoints_3d = data_dict["keypoints_3d"] - time_ids = data_dict["time_ids"] - time_pairs = data_dict["time_pairs"] - index_pairs = data_dict["index_pairs"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"] - if not np.allclose(train_Ks[time_ids], pred_keypoint_Ks): - print("Inconsistent camera intrinsics.") - print(train_Ks[time_ids][0], pred_keypoint_Ks[0]) - keypoint_w2cs = train_w2cs[time_ids] - q, t, s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0] - R = roma.unitquat_to_rotmat(q.roll(-1, dims=-1)) - pred_keypoints_3d = np.einsum( - "ij,...j->...i", - rt_to_mat4(R, t, s).numpy().astype(np.float64), - np.pad(pred_keypoints_3d, ((0, 0), (0, 0), (0, 1)), constant_values=1), - ) - pred_keypoints_3d = pred_keypoints_3d[..., :3] / pred_keypoints_3d[..., 3:] - # Compute 3D tracking metrics. - pair_keypoints_3d = keypoints_3d[index_pairs] - is_covisible = (pair_keypoints_3d[:, :, :, -1] == 1).all(axis=1) - target_keypoints_3d = pair_keypoints_3d[:, 1, :, :3] - epes = [] - for i in range(len(time_pairs)): - epes.append( - np.linalg.norm( - target_keypoints_3d[i][is_covisible[i]] - - pred_keypoints_3d[i][is_covisible[i]], - axis=-1, - ) - ) - epe = np.mean( - [frame_epes.mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_10cm = np.mean( - [(frame_epes < 0.1).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - pck_3d_5cm = np.mean( - [(frame_epes < 0.05).mean() for frame_epes in epes if len(frame_epes) > 0] - ).item() - print(f"3D tracking EPE: {epe:.4f}") - print(f"3D tracking PCK (10cm): {pck_3d_10cm:.4f}") - print(f"3D tracking PCK (5cm): {pck_3d_5cm:.4f}") - print("-----------------------------") - return epe, pck_3d_10cm, pck_3d_5cm - - -def project(Ks, w2cs, pts): - """ - Args: - Ks: (N, 3, 3) camera intrinsics. - w2cs: (N, 4, 4) camera extrinsics. - pts: (N, N, M, 3) 3D points. - """ - N = Ks.shape[0] - pts = pts.swapaxes(0, 1).reshape(N, -1, 3) - - pts_homogeneous = np.concatenate([pts, np.ones_like(pts[..., -1:])], axis=-1) - - # Apply world-to-camera transformation - pts_homogeneous = np.matmul(w2cs[:, :3], pts_homogeneous.swapaxes(1, 2)).swapaxes( - 1, 2 - ) - # Project to image plane using intrinsic parameters - projected_pts = np.matmul(Ks, pts_homogeneous.swapaxes(1, 2)).swapaxes(1, 2) - - depths = projected_pts[..., 2:3] - # Normalize homogeneous coordinates - projected_pts = projected_pts[..., :2] / np.clip(depths, a_min=1e-6, a_max=None) - projected_pts = projected_pts.reshape(N, N, -1, 2).swapaxes(0, 1) - depths = depths.reshape(N, N, -1).swapaxes(0, 1) - return projected_pts, depths - - -def evaluate_2d_tracking(data_dict, result_dict): - train_w2cs = data_dict["train_w2cs"] - keypoints_2d = data_dict["keypoints_2d"] - visibilities = keypoints_2d[..., -1].astype(np.bool_) - time_ids = data_dict["time_ids"] - num_frames = len(time_ids) - num_pts = keypoints_2d.shape[1] - pred_train_depths = result_dict["pred_train_depths"] - pred_keypoint_Ks = result_dict["pred_keypoint_Ks"] - pred_keypoint_w2cs = result_dict["pred_keypoint_w2cs"] - pred_keypoints_3d = result_dict["pred_keypoints_3d"].reshape( - num_frames, -1, num_pts, 3 - ) - keypoint_w2cs = train_w2cs[time_ids] - s = solve_procrustes( - torch.from_numpy(np.linalg.inv(pred_keypoint_w2cs)[:, :3, -1]).to( - torch.float32 - ), - torch.from_numpy(np.linalg.inv(keypoint_w2cs)[:, :3, -1]).to(torch.float32), - )[0][-1].item() - - target_points = keypoints_2d[None].repeat(num_frames, axis=0)[..., :2] - target_visibilities = visibilities[None].repeat(num_frames, axis=0) - - pred_points, pred_depths = project( - pred_keypoint_Ks, pred_keypoint_w2cs, pred_keypoints_3d - ) - if result_dict["pred_visibilities"] is not None: - pred_visibilities = result_dict["pred_visibilities"].reshape( - num_frames, -1, num_pts - ) - else: - rendered_depths = [] - for i, points in zip( - data_dict["index_pairs"][:, -1], - pred_points.reshape(-1, pred_points.shape[2], 2), - ): - rendered_depths.append( - cv2.remap( - pred_train_depths[i].astype(np.float32), - points[None].astype(np.float32), # type: ignore - None, # type: ignore - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - )[0] - ) - rendered_depths = np.array(rendered_depths).reshape(num_frames, -1, num_pts) - pred_visibilities = (np.abs(rendered_depths - pred_depths) * s) < 0.05 - - one_hot_eye = np.eye(target_points.shape[0])[..., None].repeat(num_pts, axis=-1) - evaluation_points = one_hot_eye == 0 - for i in range(num_frames): - evaluation_points[i, :, ~visibilities[i]] = False - occ_acc = np.sum( - np.equal(pred_visibilities, target_visibilities) & evaluation_points - ) / np.sum(evaluation_points) - all_frac_within = [] - all_jaccard = [] - - for thresh in [4, 8, 16, 32, 64]: - within_dist = np.sum( - np.square(pred_points - target_points), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, target_visibilities) - count_correct = np.sum(is_correct & evaluation_points) - count_visible_points = np.sum(target_visibilities & evaluation_points) - frac_correct = count_correct / count_visible_points - all_frac_within.append(frac_correct) - - true_positives = np.sum(is_correct & pred_visibilities & evaluation_points) - gt_positives = np.sum(target_visibilities & evaluation_points) - false_positives = (~target_visibilities) & pred_visibilities - false_positives = false_positives | ((~within_dist) & pred_visibilities) - false_positives = np.sum(false_positives & evaluation_points) - jaccard = true_positives / (gt_positives + false_positives) - all_jaccard.append(jaccard) - AJ = np.mean(all_jaccard) - APCK = np.mean(all_frac_within) - - print(f"2D tracking AJ: {AJ:.4f}") - print(f"2D tracking avg PCK: {APCK:.4f}") - print(f"2D tracking occlusion accuracy: {occ_acc:.4f}") - print("-----------------------------") - return AJ, APCK, occ_acc - - -def evaluate_nv(data_dict, result_dict): - device = "cuda" - psnr_metric = mPSNR().to(device) - ssim_metric = mSSIM().to(device) - lpips_metric = mLPIPS().to(device) - - val_imgs = torch.from_numpy(data_dict["val_imgs"])[..., :3].to(device) - val_covisibles = torch.from_numpy(data_dict["val_covisibles"]).to(device) - pred_val_imgs = torch.from_numpy(result_dict["pred_val_imgs"]).to(device) - - for i in range(len(val_imgs)): - val_img = val_imgs[i] / 255.0 - pred_val_img = pred_val_imgs[i] / 255.0 - val_covisible = val_covisibles[i] / 255.0 - psnr_metric.update(val_img, pred_val_img, val_covisible) - ssim_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - lpips_metric.update(val_img[None], pred_val_img[None], val_covisible[None]) - mpsnr = psnr_metric.compute().item() - mssim = ssim_metric.compute().item() - mlpips = lpips_metric.compute().item() - print(f"NV mPSNR: {mpsnr:.4f}") - print(f"NV mSSIM: {mssim:.4f}") - print(f"NV mLPIPS: {mlpips:.4f}") - return mpsnr, mssim, mlpips - - -if __name__ == "__main__": - seq_names = args.seq_names - - epe_all, pck_3d_10cm_all, pck_3d_5cm_all = [], [], [] - AJ_all, APCK_all, occ_acc_all = [], [], [] - mpsnr_all, mssim_all, mlpips_all = [], [], [] - - for seq_name in seq_names: - print("=========================================") - print(f"Evaluating {seq_name}") - print("=========================================") - data_dir = osp.join(args.data_dir, seq_name) - if not osp.exists(data_dir): - data_dir = args.data_dir - if not osp.exists(data_dir): - raise ValueError(f"Data directory {data_dir} not found.") - result_dir = osp.join(args.result_dir, seq_name, "results/") - if not osp.exists(result_dir): - result_dir = osp.join(args.result_dir, "results/") - if not osp.exists(result_dir): - raise ValueError(f"Result directory {result_dir} not found.") - - with open(osp.join(data_dir, "splits/train.json")) as f: - train_names = json.load(f)["frame_names"] - with open(osp.join(data_dir, "splits/val.json")) as f: - val_names = json.load(f)["frame_names"] - - data_dict = load_data_dict(data_dir, train_names, val_names) - result_dict = load_result_dict(result_dir, val_names) - if result_dict["pred_keypoints_3d"] is not None: - epe, pck_3d_10cm, pck_3d_5cm = evaluate_3d_tracking(data_dict, result_dict) - AJ, APCK, occ_acc = evaluate_2d_tracking(data_dict, result_dict) - epe_all.append(epe) - pck_3d_10cm_all.append(pck_3d_10cm) - pck_3d_5cm_all.append(pck_3d_5cm) - AJ_all.append(AJ) - APCK_all.append(APCK) - occ_acc_all.append(occ_acc) - if len(data_dict["val_imgs"]) > 0: - if result_dict["pred_val_imgs"] is None: - print("No NV results found.") - continue - mpsnr, mssim, mlpips = evaluate_nv(data_dict, result_dict) - mpsnr_all.append(mpsnr) - mssim_all.append(mssim) - mlpips_all.append(mlpips) - - print(f"mean 3D tracking EPE: {np.mean(epe_all):.4f}") - print(f"mean 3D tracking PCK (10cm): {np.mean(pck_3d_10cm_all):.4f}") - print(f"mean 3D tracking PCK (5cm): {np.mean(pck_3d_5cm_all):.4f}") - print(f"mean 2D tracking AJ: {np.mean(AJ_all):.4f}") - print(f"mean 2D tracking avg PCK: {np.mean(APCK_all):.4f}") - print(f"mean 2D tracking occlusion accuracy: {np.mean(occ_acc_all):.4f}") - print(f"mean NV mPSNR: {np.mean(mpsnr_all):.4f}") - print(f"mean NV mSSIM: {np.mean(mssim_all):.4f}") - print(f"mean NV mLPIPS: {np.mean(mlpips_all):.4f}") diff --git a/som_out/swing/events.out.tfevents.1729879346.cvsv00-140.1480844.0 b/som_out/swing/events.out.tfevents.1729879346.cvsv00-140.1480844.0 deleted file mode 100644 index 54998e7b59292ec66e3dc358a9946f0af0adee33..0000000000000000000000000000000000000000 --- a/som_out/swing/events.out.tfevents.1729879346.cvsv00-140.1480844.0 +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:eb6297add773fcea4a8e75f32046e35a3537222c454c6e900614c74ef6b9f436 -size 3865878