henry000 commited on
Commit
3817f54
Β·
2 Parent(s): 731ece0 fa548df

πŸ”€ [Merge] branch 'main' into HF

Browse files
yolo/tools/data_augmentation.py CHANGED
@@ -67,7 +67,7 @@ class PadAndResize:
67
  scale = min(self.target_width / img_width, self.target_height / img_height)
68
  new_width, new_height = int(img_width * scale), int(img_height * scale)
69
 
70
- resized_image = image.resize((new_width, new_height), Image.LANCZOS)
71
 
72
  pad_left = (self.target_width - new_width) // 2
73
  pad_top = (self.target_height - new_height) // 2
 
67
  scale = min(self.target_width / img_width, self.target_height / img_height)
68
  new_width, new_height = int(img_width * scale), int(img_height * scale)
69
 
70
+ resized_image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
71
 
72
  pad_left = (self.target_width - new_width) // 2
73
  pad_top = (self.target_height - new_height) // 2
yolo/tools/data_loader.py CHANGED
@@ -126,10 +126,12 @@ class YoloDataset(Dataset):
126
 
127
  def load_valid_labels(self, label_path: str, seg_data_one_img: list) -> Union[Tensor, None]:
128
  """
129
- Loads and validates bounding box data is [0, 1] from a label file.
 
130
 
131
  Parameters:
132
- label_path (str): The filepath to the label file containing bounding box data.
 
133
 
134
  Returns:
135
  Tensor or None: A tensor of all valid bounding boxes if any are found; otherwise, None.
 
126
 
127
  def load_valid_labels(self, label_path: str, seg_data_one_img: list) -> Union[Tensor, None]:
128
  """
129
+ Loads valid COCO style segmentation data (values between [0, 1]) and converts it to bounding box coordinates
130
+ by finding the minimum and maximum x and y values.
131
 
132
  Parameters:
133
+ label_path (str): The filepath to the label file containing annotation data.
134
+ seg_data_one_img (list): The actual list of annotations (in segmentation format)
135
 
136
  Returns:
137
  Tensor or None: A tensor of all valid bounding boxes if any are found; otherwise, None.
yolo/tools/solver.py CHANGED
@@ -48,17 +48,9 @@ class ValidateModel(BaseModel):
48
  batch_size, images, targets, rev_tensor, img_paths = batch
49
  H, W = images.shape[2:]
50
  predicts = self.post_process(self.ema(images), image_size=[W, H])
51
- batch_metrics = self.metric(
52
  [to_metrics_format(predict) for predict in predicts], [to_metrics_format(target) for target in targets]
53
  )
54
-
55
- self.log_dict(
56
- {
57
- "map": batch_metrics["map"],
58
- "map_50": batch_metrics["map_50"],
59
- },
60
- batch_size=batch_size,
61
- )
62
  return predicts
63
 
64
  def on_validation_epoch_end(self):
 
48
  batch_size, images, targets, rev_tensor, img_paths = batch
49
  H, W = images.shape[2:]
50
  predicts = self.post_process(self.ema(images), image_size=[W, H])
51
+ self.metric.update(
52
  [to_metrics_format(predict) for predict in predicts], [to_metrics_format(target) for target in targets]
53
  )
 
 
 
 
 
 
 
 
54
  return predicts
55
 
56
  def on_validation_epoch_end(self):
yolo/utils/bounding_box_utils.py CHANGED
@@ -484,6 +484,7 @@ def calculate_map(predictions, ground_truths) -> Dict[str, Tensor]:
484
 
485
 
486
  def to_metrics_format(prediction: Tensor) -> Dict[str, Union[float, Tensor]]:
 
487
  bbox = {"boxes": prediction[:, 1:5], "labels": prediction[:, 0].int()}
488
  if prediction.size(1) == 6:
489
  bbox["scores"] = prediction[:, 5]
 
484
 
485
 
486
  def to_metrics_format(prediction: Tensor) -> Dict[str, Union[float, Tensor]]:
487
+ prediction = prediction[prediction[:, 0] != -1]
488
  bbox = {"boxes": prediction[:, 1:5], "labels": prediction[:, 0].int()}
489
  if prediction.size(1) == 6:
490
  bbox["scores"] = prediction[:, 5]
yolo/utils/dataset_utils.py CHANGED
@@ -104,7 +104,9 @@ def scale_segmentation(
104
  if "segmentation" in anno:
105
  seg_list = [item for sublist in anno["segmentation"] for item in sublist]
106
  elif "bbox" in anno:
107
- seg_list = anno["bbox"]
 
 
108
  scaled_seg_data = (
109
  np.array(seg_list).reshape(-1, 2) / [w, h]
110
  ).tolist() # make the list group in x, y pairs and scaled with image width, height
 
104
  if "segmentation" in anno:
105
  seg_list = [item for sublist in anno["segmentation"] for item in sublist]
106
  elif "bbox" in anno:
107
+ x, y, width, height = anno["bbox"]
108
+ seg_list = [x, y, x + width, y, x + width, y + height, x, y + height]
109
+
110
  scaled_seg_data = (
111
  np.array(seg_list).reshape(-1, 2) / [w, h]
112
  ).tolist() # make the list group in x, y pairs and scaled with image width, height