3v324v23 commited on
Commit
2cc2d03
·
1 Parent(s): 2215d37

--n_workers to eval script bcoz y not

Browse files
dronescapes_reader/multitask_dataset.py CHANGED
@@ -70,7 +70,7 @@ class MultiTaskDataset(Dataset):
70
  """
71
 
72
  def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none",
73
- files_suffix: str = "npz", task_types: dict[str, type] = None):
74
  assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
75
  assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
76
  f"Invalid handle_missing_data mode: {handle_missing_data}"
@@ -94,13 +94,19 @@ class MultiTaskDataset(Dataset):
94
  self._tasks: list[NpzRepresentation] | None = None
95
  self.name_to_task = {task.name: task for task in self.tasks}
96
  logger.info(f"Tasks used in this dataset: {self.task_names}")
97
-
98
- _default_val = float("nan") if handle_missing_data == "fill_nan" else 0
99
- self._defaults = {task: None if handle_missing_data == "fill_none" else
100
- tr.full(self.data_shape[task], _default_val) for task in self.task_names}
101
 
102
  # Public methods and properties
103
 
 
 
 
 
 
 
 
 
 
104
  @property
105
  def data_shape(self) -> dict[str, tuple[int, ...]]:
106
  """Returns a {task: shape_tuple} for all representations. At least one npz file must exist for each."""
@@ -214,7 +220,7 @@ class MultiTaskDataset(Dataset):
214
  for task in self.tasks:
215
  file_path = self.files_per_repr[task.name][index]
216
  file_path = None if file_path is None or not (fpr := file_path.resolve()).exists() else fpr
217
- res[task.name] = task.load_from_disk(file_path) if file_path is not None else self._defaults[task.name]
218
  return (res, item_name, self.task_names)
219
 
220
  def __len__(self) -> int:
 
70
  """
71
 
72
  def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none",
73
+ files_suffix: str = "npz", task_types: dict[str, type] | None = None):
74
  assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
75
  assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
76
  f"Invalid handle_missing_data mode: {handle_missing_data}"
 
94
  self._tasks: list[NpzRepresentation] | None = None
95
  self.name_to_task = {task.name: task for task in self.tasks}
96
  logger.info(f"Tasks used in this dataset: {self.task_names}")
97
+ self._default_vals: dict[str, tr.Tensor] | None = None
 
 
 
98
 
99
  # Public methods and properties
100
 
101
+ @property
102
+ def default_vals(self) -> dict[str, tr.Tensor]:
103
+ """default values for __getitem__ if item is not on disk but we retrieve a full batch anyway"""
104
+ if self._default_vals is None:
105
+ _default_val = float("nan") if self.handle_missing_data == "fill_nan" else 0
106
+ self._default_vals = {task: None if self.handle_missing_data == "fill_none" else
107
+ tr.full(self.data_shape[task], _default_val) for task in self.task_names}
108
+ return self._default_vals
109
+
110
  @property
111
  def data_shape(self) -> dict[str, tuple[int, ...]]:
112
  """Returns a {task: shape_tuple} for all representations. At least one npz file must exist for each."""
 
220
  for task in self.tasks:
221
  file_path = self.files_per_repr[task.name][index]
222
  file_path = None if file_path is None or not (fpr := file_path.resolve()).exists() else fpr
223
+ res[task.name] = task.load_from_disk(file_path) if file_path is not None else self.default_vals[task.name]
224
  return (res, item_name, self.task_names)
225
 
226
  def __len__(self) -> int:
scripts/evaluate_semantic_segmentation.py CHANGED
@@ -9,9 +9,10 @@ from loguru import logger
9
  from pathlib import Path
10
  from argparse import ArgumentParser, Namespace
11
  from tempfile import TemporaryDirectory
 
12
  from functools import partial
13
  from torchmetrics.functional.classification import multiclass_stat_scores
14
- from tqdm import trange
15
  import torch as tr
16
  import numpy as np
17
  import pandas as pd
@@ -34,16 +35,20 @@ def compute_metrics_by_class(df: pd.DataFrame, class_name: str) -> pd.DataFrame:
34
  df = df.fillna(0).round(3)
35
  return df
36
 
37
- def compute_raw_stats_per_frame(reader: MultiTaskDataset, classes: list[str]) -> pd.DataFrame:
 
 
 
 
 
 
38
  res = tr.zeros((len(reader), len(classes), 4)).long() # (N, NC, 4)
39
- index = []
40
- for i in trange(len(reader)):
41
- x = reader[i]
42
- y = x[0]["pred"].argmax(-1) if x[0]["pred"].dtype != tr.int64 else x[0]["pred"]
43
- gt = x[0]["gt"].argmax(-1) if x[0]["gt"].dtype != tr.int64 else x[0]["gt"]
44
- res[i] = multiclass_stat_scores(y, gt, num_classes=len(classes), average=None)[:, 0:4]
45
- index.append(x[1])
46
- res = res.reshape(len(reader) * len(classes), 4)
47
  df = pd.DataFrame(res, index=np.repeat(index, len(classes)), columns=["tp", "fp", "tn", "fn"])
48
  df.insert(0, "class_name", np.array(classes)[:, None].repeat(len(index), 1).T.flatten())
49
  return df
@@ -77,6 +82,7 @@ def get_args() -> Namespace:
77
  parser.add_argument("--class_weights", nargs="+", type=float)
78
  parser.add_argument("--scenes", nargs="+", default=["all"], help="each scene will get separate metrics if provided")
79
  parser.add_argument("--overwrite", action="store_true")
 
80
  args = parser.parse_args()
81
  if args.class_weights is None:
82
  logger.info("No class weights provided, defaulting to equal weights.")
@@ -88,6 +94,7 @@ def get_args() -> Namespace:
88
  logger.info(f"Scenes: {args.scenes}")
89
  if args.output_path.exists() and args.overwrite:
90
  os.remove(args.output_path)
 
91
  return args
92
 
93
  def main(args: Namespace):
@@ -98,7 +105,7 @@ def main(args: Namespace):
98
  assert (a := len(reader.all_files_per_repr["gt"])) == (b := len(reader.all_files_per_repr["pred"])), f"{a} vs {b}"
99
 
100
  # Compute TP, FP, TN, FN for each frame
101
- raw_stats = compute_raw_stats_per_frame(reader, args.classes)
102
  logger.info(f"Stored raw metrics file to: '{args.output_path}'")
103
  Path(args.output_path).parent.mkdir(exist_ok=True, parents=True)
104
  raw_stats.to_csv(args.output_path)
 
9
  from pathlib import Path
10
  from argparse import ArgumentParser, Namespace
11
  from tempfile import TemporaryDirectory
12
+ from multiprocessing import Pool
13
  from functools import partial
14
  from torchmetrics.functional.classification import multiclass_stat_scores
15
+ from tqdm import tqdm
16
  import torch as tr
17
  import numpy as np
18
  import pandas as pd
 
35
  df = df.fillna(0).round(3)
36
  return df
37
 
38
+ def _do_one(i: int, reader: MultiTaskDataset, num_classes: int) -> tuple[tr.Tensor, str]:
39
+ data, name = reader[i][0:2]
40
+ y = data["pred"].argmax(-1) if data["pred"].dtype != tr.int64 else data["pred"]
41
+ gt = data["gt"].argmax(-1) if data["gt"].dtype != tr.int64 else data["gt"]
42
+ return multiclass_stat_scores(y, gt, num_classes=num_classes, average=None)[:, 0:4], name
43
+
44
+ def compute_raw_stats_per_frame(reader: MultiTaskDataset, classes: list[str], n_workers: int = 1) -> pd.DataFrame:
45
  res = tr.zeros((len(reader), len(classes), 4)).long() # (N, NC, 4)
46
+
47
+ map_fn = map if n_workers == 1 else Pool(n_workers).imap
48
+ do_one_fn = partial(_do_one, reader=reader, num_classes=len(classes))
49
+ map_res = list(tqdm(map_fn(do_one_fn, range(len(reader))), total=len(reader)))
50
+ res, index = tr.stack([x[0] for x in map_res]).reshape(len(reader) * len(classes), 4), [x[1] for x in map_res]
51
+
 
 
52
  df = pd.DataFrame(res, index=np.repeat(index, len(classes)), columns=["tp", "fp", "tn", "fn"])
53
  df.insert(0, "class_name", np.array(classes)[:, None].repeat(len(index), 1).T.flatten())
54
  return df
 
82
  parser.add_argument("--class_weights", nargs="+", type=float)
83
  parser.add_argument("--scenes", nargs="+", default=["all"], help="each scene will get separate metrics if provided")
84
  parser.add_argument("--overwrite", action="store_true")
85
+ parser.add_argument("--n_workers", type=int, default=1)
86
  args = parser.parse_args()
87
  if args.class_weights is None:
88
  logger.info("No class weights provided, defaulting to equal weights.")
 
94
  logger.info(f"Scenes: {args.scenes}")
95
  if args.output_path.exists() and args.overwrite:
96
  os.remove(args.output_path)
97
+ assert args.n_workers >= 1 and isinstance(args.n_workers, int), args.n_workers
98
  return args
99
 
100
  def main(args: Namespace):
 
105
  assert (a := len(reader.all_files_per_repr["gt"])) == (b := len(reader.all_files_per_repr["pred"])), f"{a} vs {b}"
106
 
107
  # Compute TP, FP, TN, FN for each frame
108
+ raw_stats = compute_raw_stats_per_frame(reader, args.classes, args.n_workers)
109
  logger.info(f"Stored raw metrics file to: '{args.output_path}'")
110
  Path(args.output_path).parent.mkdir(exist_ok=True, parents=True)
111
  raw_stats.to_csv(args.output_path)